GNU Linux-libre 4.9.311-gnu1
[releases.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/lockdep.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <scsi/fc/fc_fs.h>
35 #include <linux/aer.h>
36
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_compat.h"
48 #include "lpfc_debugfs.h"
49 #include "lpfc_vport.h"
50
51 /* There are only four IOCB completion types. */
52 typedef enum _lpfc_iocb_type {
53         LPFC_UNKNOWN_IOCB,
54         LPFC_UNSOL_IOCB,
55         LPFC_SOL_IOCB,
56         LPFC_ABORT_IOCB
57 } lpfc_iocb_type;
58
59
60 /* Provide function prototypes local to this module. */
61 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
62                                   uint32_t);
63 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
64                               uint8_t *, uint32_t *);
65 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
66                                                          struct lpfc_iocbq *);
67 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
68                                       struct hbq_dmabuf *);
69 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
70                                     struct lpfc_cqe *);
71 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
72                                        int);
73 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
74                         uint32_t);
75 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
76 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
77
78 static IOCB_t *
79 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
80 {
81         return &iocbq->iocb;
82 }
83
84 /**
85  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
86  * @q: The Work Queue to operate on.
87  * @wqe: The work Queue Entry to put on the Work queue.
88  *
89  * This routine will copy the contents of @wqe to the next available entry on
90  * the @q. This function will then ring the Work Queue Doorbell to signal the
91  * HBA to start processing the Work Queue Entry. This function returns 0 if
92  * successful. If no entries are available on @q then this function will return
93  * -ENOMEM.
94  * The caller is expected to hold the hbalock when calling this routine.
95  **/
96 static uint32_t
97 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
98 {
99         union lpfc_wqe *temp_wqe;
100         struct lpfc_register doorbell;
101         uint32_t host_index;
102         uint32_t idx;
103
104         /* sanity check on queue memory */
105         if (unlikely(!q))
106                 return -ENOMEM;
107         temp_wqe = q->qe[q->host_index].wqe;
108
109         /* If the host has not yet processed the next entry then we are done */
110         idx = ((q->host_index + 1) % q->entry_count);
111         if (idx == q->hba_index) {
112                 q->WQ_overflow++;
113                 return -ENOMEM;
114         }
115         q->WQ_posted++;
116         /* set consumption flag every once in a while */
117         if (!((q->host_index + 1) % q->entry_repost))
118                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
119         else
120                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
121         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
122                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
123         lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
124         /* ensure WQE bcopy flushed before doorbell write */
125         wmb();
126
127         /* Update the host index before invoking device */
128         host_index = q->host_index;
129
130         q->host_index = idx;
131
132         /* Ring Doorbell */
133         doorbell.word0 = 0;
134         if (q->db_format == LPFC_DB_LIST_FORMAT) {
135                 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
136                 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
137                 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
138         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
139                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
140                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
141         } else {
142                 return -EINVAL;
143         }
144         writel(doorbell.word0, q->db_regaddr);
145
146         return 0;
147 }
148
149 /**
150  * lpfc_sli4_wq_release - Updates internal hba index for WQ
151  * @q: The Work Queue to operate on.
152  * @index: The index to advance the hba index to.
153  *
154  * This routine will update the HBA index of a queue to reflect consumption of
155  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
156  * an entry the host calls this function to update the queue's internal
157  * pointers. This routine returns the number of entries that were consumed by
158  * the HBA.
159  **/
160 static uint32_t
161 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
162 {
163         uint32_t released = 0;
164
165         /* sanity check on queue memory */
166         if (unlikely(!q))
167                 return 0;
168
169         if (q->hba_index == index)
170                 return 0;
171         do {
172                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
173                 released++;
174         } while (q->hba_index != index);
175         return released;
176 }
177
178 /**
179  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
180  * @q: The Mailbox Queue to operate on.
181  * @wqe: The Mailbox Queue Entry to put on the Work queue.
182  *
183  * This routine will copy the contents of @mqe to the next available entry on
184  * the @q. This function will then ring the Work Queue Doorbell to signal the
185  * HBA to start processing the Work Queue Entry. This function returns 0 if
186  * successful. If no entries are available on @q then this function will return
187  * -ENOMEM.
188  * The caller is expected to hold the hbalock when calling this routine.
189  **/
190 static uint32_t
191 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
192 {
193         struct lpfc_mqe *temp_mqe;
194         struct lpfc_register doorbell;
195
196         /* sanity check on queue memory */
197         if (unlikely(!q))
198                 return -ENOMEM;
199         temp_mqe = q->qe[q->host_index].mqe;
200
201         /* If the host has not yet processed the next entry then we are done */
202         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
203                 return -ENOMEM;
204         lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
205         /* Save off the mailbox pointer for completion */
206         q->phba->mbox = (MAILBOX_t *)temp_mqe;
207
208         /* Update the host index before invoking device */
209         q->host_index = ((q->host_index + 1) % q->entry_count);
210
211         /* Ring Doorbell */
212         doorbell.word0 = 0;
213         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
214         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
215         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
216         return 0;
217 }
218
219 /**
220  * lpfc_sli4_mq_release - Updates internal hba index for MQ
221  * @q: The Mailbox Queue to operate on.
222  *
223  * This routine will update the HBA index of a queue to reflect consumption of
224  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
225  * an entry the host calls this function to update the queue's internal
226  * pointers. This routine returns the number of entries that were consumed by
227  * the HBA.
228  **/
229 static uint32_t
230 lpfc_sli4_mq_release(struct lpfc_queue *q)
231 {
232         /* sanity check on queue memory */
233         if (unlikely(!q))
234                 return 0;
235
236         /* Clear the mailbox pointer for completion */
237         q->phba->mbox = NULL;
238         q->hba_index = ((q->hba_index + 1) % q->entry_count);
239         return 1;
240 }
241
242 /**
243  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
244  * @q: The Event Queue to get the first valid EQE from
245  *
246  * This routine will get the first valid Event Queue Entry from @q, update
247  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
248  * the Queue (no more work to do), or the Queue is full of EQEs that have been
249  * processed, but not popped back to the HBA then this routine will return NULL.
250  **/
251 static struct lpfc_eqe *
252 lpfc_sli4_eq_get(struct lpfc_queue *q)
253 {
254         struct lpfc_eqe *eqe;
255         uint32_t idx;
256
257         /* sanity check on queue memory */
258         if (unlikely(!q))
259                 return NULL;
260         eqe = q->qe[q->hba_index].eqe;
261
262         /* If the next EQE is not valid then we are done */
263         if (!bf_get_le32(lpfc_eqe_valid, eqe))
264                 return NULL;
265         /* If the host has not yet processed the next entry then we are done */
266         idx = ((q->hba_index + 1) % q->entry_count);
267         if (idx == q->host_index)
268                 return NULL;
269
270         q->hba_index = idx;
271
272         /*
273          * insert barrier for instruction interlock : data from the hardware
274          * must have the valid bit checked before it can be copied and acted
275          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
276          * instructions allowing action on content before valid bit checked,
277          * add barrier here as well. May not be needed as "content" is a
278          * single 32-bit entity here (vs multi word structure for cq's).
279          */
280         mb();
281         return eqe;
282 }
283
284 /**
285  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
286  * @q: The Event Queue to disable interrupts
287  *
288  **/
289 static inline void
290 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
291 {
292         struct lpfc_register doorbell;
293
294         doorbell.word0 = 0;
295         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
296         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
297         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
298                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
299         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
300         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
301 }
302
303 /**
304  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
305  * @q: The Event Queue that the host has completed processing for.
306  * @arm: Indicates whether the host wants to arms this CQ.
307  *
308  * This routine will mark all Event Queue Entries on @q, from the last
309  * known completed entry to the last entry that was processed, as completed
310  * by clearing the valid bit for each completion queue entry. Then it will
311  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
312  * The internal host index in the @q will be updated by this routine to indicate
313  * that the host has finished processing the entries. The @arm parameter
314  * indicates that the queue should be rearmed when ringing the doorbell.
315  *
316  * This function will return the number of EQEs that were popped.
317  **/
318 uint32_t
319 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
320 {
321         uint32_t released = 0;
322         struct lpfc_eqe *temp_eqe;
323         struct lpfc_register doorbell;
324
325         /* sanity check on queue memory */
326         if (unlikely(!q))
327                 return 0;
328
329         /* while there are valid entries */
330         while (q->hba_index != q->host_index) {
331                 temp_eqe = q->qe[q->host_index].eqe;
332                 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
333                 released++;
334                 q->host_index = ((q->host_index + 1) % q->entry_count);
335         }
336         if (unlikely(released == 0 && !arm))
337                 return 0;
338
339         /* ring doorbell for number popped */
340         doorbell.word0 = 0;
341         if (arm) {
342                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
343                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
344         }
345         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
346         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
347         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
348                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
349         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
350         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
351         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
352         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
353                 readl(q->phba->sli4_hba.EQCQDBregaddr);
354         return released;
355 }
356
357 /**
358  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
359  * @q: The Completion Queue to get the first valid CQE from
360  *
361  * This routine will get the first valid Completion Queue Entry from @q, update
362  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
363  * the Queue (no more work to do), or the Queue is full of CQEs that have been
364  * processed, but not popped back to the HBA then this routine will return NULL.
365  **/
366 static struct lpfc_cqe *
367 lpfc_sli4_cq_get(struct lpfc_queue *q)
368 {
369         struct lpfc_cqe *cqe;
370         uint32_t idx;
371
372         /* sanity check on queue memory */
373         if (unlikely(!q))
374                 return NULL;
375
376         /* If the next CQE is not valid then we are done */
377         if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
378                 return NULL;
379         /* If the host has not yet processed the next entry then we are done */
380         idx = ((q->hba_index + 1) % q->entry_count);
381         if (idx == q->host_index)
382                 return NULL;
383
384         cqe = q->qe[q->hba_index].cqe;
385         q->hba_index = idx;
386
387         /*
388          * insert barrier for instruction interlock : data from the hardware
389          * must have the valid bit checked before it can be copied and acted
390          * upon. Speculative instructions were allowing a bcopy at the start
391          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
392          * after our return, to copy data before the valid bit check above
393          * was done. As such, some of the copied data was stale. The barrier
394          * ensures the check is before any data is copied.
395          */
396         mb();
397         return cqe;
398 }
399
400 /**
401  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
402  * @q: The Completion Queue that the host has completed processing for.
403  * @arm: Indicates whether the host wants to arms this CQ.
404  *
405  * This routine will mark all Completion queue entries on @q, from the last
406  * known completed entry to the last entry that was processed, as completed
407  * by clearing the valid bit for each completion queue entry. Then it will
408  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
409  * The internal host index in the @q will be updated by this routine to indicate
410  * that the host has finished processing the entries. The @arm parameter
411  * indicates that the queue should be rearmed when ringing the doorbell.
412  *
413  * This function will return the number of CQEs that were released.
414  **/
415 uint32_t
416 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
417 {
418         uint32_t released = 0;
419         struct lpfc_cqe *temp_qe;
420         struct lpfc_register doorbell;
421
422         /* sanity check on queue memory */
423         if (unlikely(!q))
424                 return 0;
425         /* while there are valid entries */
426         while (q->hba_index != q->host_index) {
427                 temp_qe = q->qe[q->host_index].cqe;
428                 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
429                 released++;
430                 q->host_index = ((q->host_index + 1) % q->entry_count);
431         }
432         if (unlikely(released == 0 && !arm))
433                 return 0;
434
435         /* ring doorbell for number popped */
436         doorbell.word0 = 0;
437         if (arm)
438                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
439         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
440         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
441         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
442                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
443         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
444         writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
445         return released;
446 }
447
448 /**
449  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
450  * @q: The Header Receive Queue to operate on.
451  * @wqe: The Receive Queue Entry to put on the Receive queue.
452  *
453  * This routine will copy the contents of @wqe to the next available entry on
454  * the @q. This function will then ring the Receive Queue Doorbell to signal the
455  * HBA to start processing the Receive Queue Entry. This function returns the
456  * index that the rqe was copied to if successful. If no entries are available
457  * on @q then this function will return -ENOMEM.
458  * The caller is expected to hold the hbalock when calling this routine.
459  **/
460 static int
461 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
462                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
463 {
464         struct lpfc_rqe *temp_hrqe;
465         struct lpfc_rqe *temp_drqe;
466         struct lpfc_register doorbell;
467         int put_index;
468
469         /* sanity check on queue memory */
470         if (unlikely(!hq) || unlikely(!dq))
471                 return -ENOMEM;
472         put_index = hq->host_index;
473         temp_hrqe = hq->qe[hq->host_index].rqe;
474         temp_drqe = dq->qe[dq->host_index].rqe;
475
476         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
477                 return -EINVAL;
478         if (hq->host_index != dq->host_index)
479                 return -EINVAL;
480         /* If the host has not yet processed the next entry then we are done */
481         if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
482                 return -EBUSY;
483         lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
484         lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
485
486         /* Update the host index to point to the next slot */
487         hq->host_index = ((hq->host_index + 1) % hq->entry_count);
488         dq->host_index = ((dq->host_index + 1) % dq->entry_count);
489
490         /* Ring The Header Receive Queue Doorbell */
491         if (!(hq->host_index % hq->entry_repost)) {
492                 doorbell.word0 = 0;
493                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
494                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
495                                hq->entry_repost);
496                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
497                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
498                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
499                                hq->entry_repost);
500                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
501                                hq->host_index);
502                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
503                 } else {
504                         return -EINVAL;
505                 }
506                 writel(doorbell.word0, hq->db_regaddr);
507         }
508         return put_index;
509 }
510
511 /**
512  * lpfc_sli4_rq_release - Updates internal hba index for RQ
513  * @q: The Header Receive Queue to operate on.
514  *
515  * This routine will update the HBA index of a queue to reflect consumption of
516  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
517  * consumed an entry the host calls this function to update the queue's
518  * internal pointers. This routine returns the number of entries that were
519  * consumed by the HBA.
520  **/
521 static uint32_t
522 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
523 {
524         /* sanity check on queue memory */
525         if (unlikely(!hq) || unlikely(!dq))
526                 return 0;
527
528         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
529                 return 0;
530         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
531         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
532         return 1;
533 }
534
535 /**
536  * lpfc_cmd_iocb - Get next command iocb entry in the ring
537  * @phba: Pointer to HBA context object.
538  * @pring: Pointer to driver SLI ring object.
539  *
540  * This function returns pointer to next command iocb entry
541  * in the command ring. The caller must hold hbalock to prevent
542  * other threads consume the next command iocb.
543  * SLI-2/SLI-3 provide different sized iocbs.
544  **/
545 static inline IOCB_t *
546 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
547 {
548         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
549                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
550 }
551
552 /**
553  * lpfc_resp_iocb - Get next response iocb entry in the ring
554  * @phba: Pointer to HBA context object.
555  * @pring: Pointer to driver SLI ring object.
556  *
557  * This function returns pointer to next response iocb entry
558  * in the response ring. The caller must hold hbalock to make sure
559  * that no other thread consume the next response iocb.
560  * SLI-2/SLI-3 provide different sized iocbs.
561  **/
562 static inline IOCB_t *
563 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
564 {
565         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
566                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
567 }
568
569 /**
570  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
571  * @phba: Pointer to HBA context object.
572  *
573  * This function is called with hbalock held. This function
574  * allocates a new driver iocb object from the iocb pool. If the
575  * allocation is successful, it returns pointer to the newly
576  * allocated iocb object else it returns NULL.
577  **/
578 struct lpfc_iocbq *
579 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
580 {
581         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
582         struct lpfc_iocbq * iocbq = NULL;
583
584         lockdep_assert_held(&phba->hbalock);
585
586         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
587         if (iocbq)
588                 phba->iocb_cnt++;
589         if (phba->iocb_cnt > phba->iocb_max)
590                 phba->iocb_max = phba->iocb_cnt;
591         return iocbq;
592 }
593
594 /**
595  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
596  * @phba: Pointer to HBA context object.
597  * @xritag: XRI value.
598  *
599  * This function clears the sglq pointer from the array of acive
600  * sglq's. The xritag that is passed in is used to index into the
601  * array. Before the xritag can be used it needs to be adjusted
602  * by subtracting the xribase.
603  *
604  * Returns sglq ponter = success, NULL = Failure.
605  **/
606 static struct lpfc_sglq *
607 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
608 {
609         struct lpfc_sglq *sglq;
610
611         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
612         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
613         return sglq;
614 }
615
616 /**
617  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
618  * @phba: Pointer to HBA context object.
619  * @xritag: XRI value.
620  *
621  * This function returns the sglq pointer from the array of acive
622  * sglq's. The xritag that is passed in is used to index into the
623  * array. Before the xritag can be used it needs to be adjusted
624  * by subtracting the xribase.
625  *
626  * Returns sglq ponter = success, NULL = Failure.
627  **/
628 struct lpfc_sglq *
629 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
630 {
631         struct lpfc_sglq *sglq;
632
633         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
634         return sglq;
635 }
636
637 /**
638  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
639  * @phba: Pointer to HBA context object.
640  * @xritag: xri used in this exchange.
641  * @rrq: The RRQ to be cleared.
642  *
643  **/
644 void
645 lpfc_clr_rrq_active(struct lpfc_hba *phba,
646                     uint16_t xritag,
647                     struct lpfc_node_rrq *rrq)
648 {
649         struct lpfc_nodelist *ndlp = NULL;
650
651         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
652                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
653
654         /* The target DID could have been swapped (cable swap)
655          * we should use the ndlp from the findnode if it is
656          * available.
657          */
658         if ((!ndlp) && rrq->ndlp)
659                 ndlp = rrq->ndlp;
660
661         if (!ndlp)
662                 goto out;
663
664         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
665                 rrq->send_rrq = 0;
666                 rrq->xritag = 0;
667                 rrq->rrq_stop_time = 0;
668         }
669 out:
670         mempool_free(rrq, phba->rrq_pool);
671 }
672
673 /**
674  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
675  * @phba: Pointer to HBA context object.
676  *
677  * This function is called with hbalock held. This function
678  * Checks if stop_time (ratov from setting rrq active) has
679  * been reached, if it has and the send_rrq flag is set then
680  * it will call lpfc_send_rrq. If the send_rrq flag is not set
681  * then it will just call the routine to clear the rrq and
682  * free the rrq resource.
683  * The timer is set to the next rrq that is going to expire before
684  * leaving the routine.
685  *
686  **/
687 void
688 lpfc_handle_rrq_active(struct lpfc_hba *phba)
689 {
690         struct lpfc_node_rrq *rrq;
691         struct lpfc_node_rrq *nextrrq;
692         unsigned long next_time;
693         unsigned long iflags;
694         LIST_HEAD(send_rrq);
695
696         spin_lock_irqsave(&phba->hbalock, iflags);
697         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
698         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
699         list_for_each_entry_safe(rrq, nextrrq,
700                                  &phba->active_rrq_list, list) {
701                 if (time_after(jiffies, rrq->rrq_stop_time))
702                         list_move(&rrq->list, &send_rrq);
703                 else if (time_before(rrq->rrq_stop_time, next_time))
704                         next_time = rrq->rrq_stop_time;
705         }
706         spin_unlock_irqrestore(&phba->hbalock, iflags);
707         if ((!list_empty(&phba->active_rrq_list)) &&
708             (!(phba->pport->load_flag & FC_UNLOADING)))
709                 mod_timer(&phba->rrq_tmr, next_time);
710         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
711                 list_del(&rrq->list);
712                 if (!rrq->send_rrq)
713                         /* this call will free the rrq */
714                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
715                 else if (lpfc_send_rrq(phba, rrq)) {
716                         /* if we send the rrq then the completion handler
717                         *  will clear the bit in the xribitmap.
718                         */
719                         lpfc_clr_rrq_active(phba, rrq->xritag,
720                                             rrq);
721                 }
722         }
723 }
724
725 /**
726  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
727  * @vport: Pointer to vport context object.
728  * @xri: The xri used in the exchange.
729  * @did: The targets DID for this exchange.
730  *
731  * returns NULL = rrq not found in the phba->active_rrq_list.
732  *         rrq = rrq for this xri and target.
733  **/
734 struct lpfc_node_rrq *
735 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
736 {
737         struct lpfc_hba *phba = vport->phba;
738         struct lpfc_node_rrq *rrq;
739         struct lpfc_node_rrq *nextrrq;
740         unsigned long iflags;
741
742         if (phba->sli_rev != LPFC_SLI_REV4)
743                 return NULL;
744         spin_lock_irqsave(&phba->hbalock, iflags);
745         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
746                 if (rrq->vport == vport && rrq->xritag == xri &&
747                                 rrq->nlp_DID == did){
748                         list_del(&rrq->list);
749                         spin_unlock_irqrestore(&phba->hbalock, iflags);
750                         return rrq;
751                 }
752         }
753         spin_unlock_irqrestore(&phba->hbalock, iflags);
754         return NULL;
755 }
756
757 /**
758  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
759  * @vport: Pointer to vport context object.
760  * @ndlp: Pointer to the lpfc_node_list structure.
761  * If ndlp is NULL Remove all active RRQs for this vport from the
762  * phba->active_rrq_list and clear the rrq.
763  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
764  **/
765 void
766 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
767
768 {
769         struct lpfc_hba *phba = vport->phba;
770         struct lpfc_node_rrq *rrq;
771         struct lpfc_node_rrq *nextrrq;
772         unsigned long iflags;
773         LIST_HEAD(rrq_list);
774
775         if (phba->sli_rev != LPFC_SLI_REV4)
776                 return;
777         if (!ndlp) {
778                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
779                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
780         }
781         spin_lock_irqsave(&phba->hbalock, iflags);
782         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
783                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
784                         list_move(&rrq->list, &rrq_list);
785         spin_unlock_irqrestore(&phba->hbalock, iflags);
786
787         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
788                 list_del(&rrq->list);
789                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
790         }
791 }
792
793 /**
794  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
795  * @phba: Pointer to HBA context object.
796  * @ndlp: Targets nodelist pointer for this exchange.
797  * @xritag the xri in the bitmap to test.
798  *
799  * This function is called with hbalock held. This function
800  * returns 0 = rrq not active for this xri
801  *         1 = rrq is valid for this xri.
802  **/
803 int
804 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
805                         uint16_t  xritag)
806 {
807         lockdep_assert_held(&phba->hbalock);
808         if (!ndlp)
809                 return 0;
810         if (!ndlp->active_rrqs_xri_bitmap)
811                 return 0;
812         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
813                         return 1;
814         else
815                 return 0;
816 }
817
818 /**
819  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
820  * @phba: Pointer to HBA context object.
821  * @ndlp: nodelist pointer for this target.
822  * @xritag: xri used in this exchange.
823  * @rxid: Remote Exchange ID.
824  * @send_rrq: Flag used to determine if we should send rrq els cmd.
825  *
826  * This function takes the hbalock.
827  * The active bit is always set in the active rrq xri_bitmap even
828  * if there is no slot avaiable for the other rrq information.
829  *
830  * returns 0 rrq actived for this xri
831  *         < 0 No memory or invalid ndlp.
832  **/
833 int
834 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
835                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
836 {
837         unsigned long iflags;
838         struct lpfc_node_rrq *rrq;
839         int empty;
840
841         if (!ndlp)
842                 return -EINVAL;
843
844         if (!phba->cfg_enable_rrq)
845                 return -EINVAL;
846
847         spin_lock_irqsave(&phba->hbalock, iflags);
848         if (phba->pport->load_flag & FC_UNLOADING) {
849                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
850                 goto out;
851         }
852
853         /*
854          * set the active bit even if there is no mem available.
855          */
856         if (NLP_CHK_FREE_REQ(ndlp))
857                 goto out;
858
859         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
860                 goto out;
861
862         if (!ndlp->active_rrqs_xri_bitmap)
863                 goto out;
864
865         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
866                 goto out;
867
868         spin_unlock_irqrestore(&phba->hbalock, iflags);
869         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
870         if (!rrq) {
871                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
872                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
873                                 " DID:0x%x Send:%d\n",
874                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
875                 return -EINVAL;
876         }
877         if (phba->cfg_enable_rrq == 1)
878                 rrq->send_rrq = send_rrq;
879         else
880                 rrq->send_rrq = 0;
881         rrq->xritag = xritag;
882         rrq->rrq_stop_time = jiffies +
883                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
884         rrq->ndlp = ndlp;
885         rrq->nlp_DID = ndlp->nlp_DID;
886         rrq->vport = ndlp->vport;
887         rrq->rxid = rxid;
888         spin_lock_irqsave(&phba->hbalock, iflags);
889         empty = list_empty(&phba->active_rrq_list);
890         list_add_tail(&rrq->list, &phba->active_rrq_list);
891         phba->hba_flag |= HBA_RRQ_ACTIVE;
892         if (empty)
893                 lpfc_worker_wake_up(phba);
894         spin_unlock_irqrestore(&phba->hbalock, iflags);
895         return 0;
896 out:
897         spin_unlock_irqrestore(&phba->hbalock, iflags);
898         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
899                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
900                         " DID:0x%x Send:%d\n",
901                         xritag, rxid, ndlp->nlp_DID, send_rrq);
902         return -EINVAL;
903 }
904
905 /**
906  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
907  * @phba: Pointer to HBA context object.
908  * @piocb: Pointer to the iocbq.
909  *
910  * This function is called with the ring lock held. This function
911  * gets a new driver sglq object from the sglq list. If the
912  * list is not empty then it is successful, it returns pointer to the newly
913  * allocated sglq object else it returns NULL.
914  **/
915 static struct lpfc_sglq *
916 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
917 {
918         struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
919         struct lpfc_sglq *sglq = NULL;
920         struct lpfc_sglq *start_sglq = NULL;
921         struct lpfc_scsi_buf *lpfc_cmd;
922         struct lpfc_nodelist *ndlp;
923         int found = 0;
924
925         lockdep_assert_held(&phba->hbalock);
926
927         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
928                 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
929                 ndlp = lpfc_cmd->rdata->pnode;
930         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
931                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
932                 ndlp = piocbq->context_un.ndlp;
933         } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
934                 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
935                         ndlp = NULL;
936                 else
937                         ndlp = piocbq->context_un.ndlp;
938         } else {
939                 ndlp = piocbq->context1;
940         }
941
942         list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
943         start_sglq = sglq;
944         while (!found) {
945                 if (!sglq)
946                         return NULL;
947                 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
948                         /* This xri has an rrq outstanding for this DID.
949                          * put it back in the list and get another xri.
950                          */
951                         list_add_tail(&sglq->list, lpfc_sgl_list);
952                         sglq = NULL;
953                         list_remove_head(lpfc_sgl_list, sglq,
954                                                 struct lpfc_sglq, list);
955                         if (sglq == start_sglq) {
956                                 sglq = NULL;
957                                 break;
958                         } else
959                                 continue;
960                 }
961                 sglq->ndlp = ndlp;
962                 found = 1;
963                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
964                 sglq->state = SGL_ALLOCATED;
965         }
966         return sglq;
967 }
968
969 /**
970  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
971  * @phba: Pointer to HBA context object.
972  *
973  * This function is called with no lock held. This function
974  * allocates a new driver iocb object from the iocb pool. If the
975  * allocation is successful, it returns pointer to the newly
976  * allocated iocb object else it returns NULL.
977  **/
978 struct lpfc_iocbq *
979 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
980 {
981         struct lpfc_iocbq * iocbq = NULL;
982         unsigned long iflags;
983
984         spin_lock_irqsave(&phba->hbalock, iflags);
985         iocbq = __lpfc_sli_get_iocbq(phba);
986         spin_unlock_irqrestore(&phba->hbalock, iflags);
987         return iocbq;
988 }
989
990 /**
991  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
992  * @phba: Pointer to HBA context object.
993  * @iocbq: Pointer to driver iocb object.
994  *
995  * This function is called with hbalock held to release driver
996  * iocb object to the iocb pool. The iotag in the iocb object
997  * does not change for each use of the iocb object. This function
998  * clears all other fields of the iocb object when it is freed.
999  * The sqlq structure that holds the xritag and phys and virtual
1000  * mappings for the scatter gather list is retrieved from the
1001  * active array of sglq. The get of the sglq pointer also clears
1002  * the entry in the array. If the status of the IO indiactes that
1003  * this IO was aborted then the sglq entry it put on the
1004  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1005  * IO has good status or fails for any other reason then the sglq
1006  * entry is added to the free list (lpfc_sgl_list).
1007  **/
1008 static void
1009 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1010 {
1011         struct lpfc_sglq *sglq;
1012         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1013         unsigned long iflag = 0;
1014         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1015
1016         lockdep_assert_held(&phba->hbalock);
1017
1018         if (iocbq->sli4_xritag == NO_XRI)
1019                 sglq = NULL;
1020         else
1021                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1022
1023
1024         if (sglq)  {
1025                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1026                         (sglq->state != SGL_XRI_ABORTED)) {
1027                         spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1028                                         iflag);
1029                         list_add(&sglq->list,
1030                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1031                         spin_unlock_irqrestore(
1032                                 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1033                 } else {
1034                         spin_lock_irqsave(&pring->ring_lock, iflag);
1035                         sglq->state = SGL_FREED;
1036                         sglq->ndlp = NULL;
1037                         list_add_tail(&sglq->list,
1038                                 &phba->sli4_hba.lpfc_sgl_list);
1039                         spin_unlock_irqrestore(&pring->ring_lock, iflag);
1040
1041                         /* Check if TXQ queue needs to be serviced */
1042                         if (!list_empty(&pring->txq))
1043                                 lpfc_worker_wake_up(phba);
1044                 }
1045         }
1046
1047
1048         /*
1049          * Clean all volatile data fields, preserve iotag and node struct.
1050          */
1051         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1052         iocbq->sli4_lxritag = NO_XRI;
1053         iocbq->sli4_xritag = NO_XRI;
1054         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1055 }
1056
1057
1058 /**
1059  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1060  * @phba: Pointer to HBA context object.
1061  * @iocbq: Pointer to driver iocb object.
1062  *
1063  * This function is called with hbalock held to release driver
1064  * iocb object to the iocb pool. The iotag in the iocb object
1065  * does not change for each use of the iocb object. This function
1066  * clears all other fields of the iocb object when it is freed.
1067  **/
1068 static void
1069 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1070 {
1071         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1072
1073         lockdep_assert_held(&phba->hbalock);
1074
1075         /*
1076          * Clean all volatile data fields, preserve iotag and node struct.
1077          */
1078         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1079         iocbq->sli4_xritag = NO_XRI;
1080         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1081 }
1082
1083 /**
1084  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1085  * @phba: Pointer to HBA context object.
1086  * @iocbq: Pointer to driver iocb object.
1087  *
1088  * This function is called with hbalock held to release driver
1089  * iocb object to the iocb pool. The iotag in the iocb object
1090  * does not change for each use of the iocb object. This function
1091  * clears all other fields of the iocb object when it is freed.
1092  **/
1093 static void
1094 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1095 {
1096         lockdep_assert_held(&phba->hbalock);
1097
1098         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1099         phba->iocb_cnt--;
1100 }
1101
1102 /**
1103  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1104  * @phba: Pointer to HBA context object.
1105  * @iocbq: Pointer to driver iocb object.
1106  *
1107  * This function is called with no lock held to release the iocb to
1108  * iocb pool.
1109  **/
1110 void
1111 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1112 {
1113         unsigned long iflags;
1114
1115         /*
1116          * Clean all volatile data fields, preserve iotag and node struct.
1117          */
1118         spin_lock_irqsave(&phba->hbalock, iflags);
1119         __lpfc_sli_release_iocbq(phba, iocbq);
1120         spin_unlock_irqrestore(&phba->hbalock, iflags);
1121 }
1122
1123 /**
1124  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1125  * @phba: Pointer to HBA context object.
1126  * @iocblist: List of IOCBs.
1127  * @ulpstatus: ULP status in IOCB command field.
1128  * @ulpWord4: ULP word-4 in IOCB command field.
1129  *
1130  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1131  * on the list by invoking the complete callback function associated with the
1132  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1133  * fields.
1134  **/
1135 void
1136 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1137                       uint32_t ulpstatus, uint32_t ulpWord4)
1138 {
1139         struct lpfc_iocbq *piocb;
1140
1141         while (!list_empty(iocblist)) {
1142                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1143                 if (!piocb->iocb_cmpl)
1144                         lpfc_sli_release_iocbq(phba, piocb);
1145                 else {
1146                         piocb->iocb.ulpStatus = ulpstatus;
1147                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1148                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1149                 }
1150         }
1151         return;
1152 }
1153
1154 /**
1155  * lpfc_sli_iocb_cmd_type - Get the iocb type
1156  * @iocb_cmnd: iocb command code.
1157  *
1158  * This function is called by ring event handler function to get the iocb type.
1159  * This function translates the iocb command to an iocb command type used to
1160  * decide the final disposition of each completed IOCB.
1161  * The function returns
1162  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1163  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1164  * LPFC_ABORT_IOCB   if it is an abort iocb
1165  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1166  *
1167  * The caller is not required to hold any lock.
1168  **/
1169 static lpfc_iocb_type
1170 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1171 {
1172         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1173
1174         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1175                 return 0;
1176
1177         switch (iocb_cmnd) {
1178         case CMD_XMIT_SEQUENCE_CR:
1179         case CMD_XMIT_SEQUENCE_CX:
1180         case CMD_XMIT_BCAST_CN:
1181         case CMD_XMIT_BCAST_CX:
1182         case CMD_ELS_REQUEST_CR:
1183         case CMD_ELS_REQUEST_CX:
1184         case CMD_CREATE_XRI_CR:
1185         case CMD_CREATE_XRI_CX:
1186         case CMD_GET_RPI_CN:
1187         case CMD_XMIT_ELS_RSP_CX:
1188         case CMD_GET_RPI_CR:
1189         case CMD_FCP_IWRITE_CR:
1190         case CMD_FCP_IWRITE_CX:
1191         case CMD_FCP_IREAD_CR:
1192         case CMD_FCP_IREAD_CX:
1193         case CMD_FCP_ICMND_CR:
1194         case CMD_FCP_ICMND_CX:
1195         case CMD_FCP_TSEND_CX:
1196         case CMD_FCP_TRSP_CX:
1197         case CMD_FCP_TRECEIVE_CX:
1198         case CMD_FCP_AUTO_TRSP_CX:
1199         case CMD_ADAPTER_MSG:
1200         case CMD_ADAPTER_DUMP:
1201         case CMD_XMIT_SEQUENCE64_CR:
1202         case CMD_XMIT_SEQUENCE64_CX:
1203         case CMD_XMIT_BCAST64_CN:
1204         case CMD_XMIT_BCAST64_CX:
1205         case CMD_ELS_REQUEST64_CR:
1206         case CMD_ELS_REQUEST64_CX:
1207         case CMD_FCP_IWRITE64_CR:
1208         case CMD_FCP_IWRITE64_CX:
1209         case CMD_FCP_IREAD64_CR:
1210         case CMD_FCP_IREAD64_CX:
1211         case CMD_FCP_ICMND64_CR:
1212         case CMD_FCP_ICMND64_CX:
1213         case CMD_FCP_TSEND64_CX:
1214         case CMD_FCP_TRSP64_CX:
1215         case CMD_FCP_TRECEIVE64_CX:
1216         case CMD_GEN_REQUEST64_CR:
1217         case CMD_GEN_REQUEST64_CX:
1218         case CMD_XMIT_ELS_RSP64_CX:
1219         case DSSCMD_IWRITE64_CR:
1220         case DSSCMD_IWRITE64_CX:
1221         case DSSCMD_IREAD64_CR:
1222         case DSSCMD_IREAD64_CX:
1223                 type = LPFC_SOL_IOCB;
1224                 break;
1225         case CMD_ABORT_XRI_CN:
1226         case CMD_ABORT_XRI_CX:
1227         case CMD_CLOSE_XRI_CN:
1228         case CMD_CLOSE_XRI_CX:
1229         case CMD_XRI_ABORTED_CX:
1230         case CMD_ABORT_MXRI64_CN:
1231         case CMD_XMIT_BLS_RSP64_CX:
1232                 type = LPFC_ABORT_IOCB;
1233                 break;
1234         case CMD_RCV_SEQUENCE_CX:
1235         case CMD_RCV_ELS_REQ_CX:
1236         case CMD_RCV_SEQUENCE64_CX:
1237         case CMD_RCV_ELS_REQ64_CX:
1238         case CMD_ASYNC_STATUS:
1239         case CMD_IOCB_RCV_SEQ64_CX:
1240         case CMD_IOCB_RCV_ELS64_CX:
1241         case CMD_IOCB_RCV_CONT64_CX:
1242         case CMD_IOCB_RET_XRI64_CX:
1243                 type = LPFC_UNSOL_IOCB;
1244                 break;
1245         case CMD_IOCB_XMIT_MSEQ64_CR:
1246         case CMD_IOCB_XMIT_MSEQ64_CX:
1247         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1248         case CMD_IOCB_RCV_ELS_LIST64_CX:
1249         case CMD_IOCB_CLOSE_EXTENDED_CN:
1250         case CMD_IOCB_ABORT_EXTENDED_CN:
1251         case CMD_IOCB_RET_HBQE64_CN:
1252         case CMD_IOCB_FCP_IBIDIR64_CR:
1253         case CMD_IOCB_FCP_IBIDIR64_CX:
1254         case CMD_IOCB_FCP_ITASKMGT64_CX:
1255         case CMD_IOCB_LOGENTRY_CN:
1256         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1257                 printk("%s - Unhandled SLI-3 Command x%x\n",
1258                                 __func__, iocb_cmnd);
1259                 type = LPFC_UNKNOWN_IOCB;
1260                 break;
1261         default:
1262                 type = LPFC_UNKNOWN_IOCB;
1263                 break;
1264         }
1265
1266         return type;
1267 }
1268
1269 /**
1270  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1271  * @phba: Pointer to HBA context object.
1272  *
1273  * This function is called from SLI initialization code
1274  * to configure every ring of the HBA's SLI interface. The
1275  * caller is not required to hold any lock. This function issues
1276  * a config_ring mailbox command for each ring.
1277  * This function returns zero if successful else returns a negative
1278  * error code.
1279  **/
1280 static int
1281 lpfc_sli_ring_map(struct lpfc_hba *phba)
1282 {
1283         struct lpfc_sli *psli = &phba->sli;
1284         LPFC_MBOXQ_t *pmb;
1285         MAILBOX_t *pmbox;
1286         int i, rc, ret = 0;
1287
1288         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1289         if (!pmb)
1290                 return -ENOMEM;
1291         pmbox = &pmb->u.mb;
1292         phba->link_state = LPFC_INIT_MBX_CMDS;
1293         for (i = 0; i < psli->num_rings; i++) {
1294                 lpfc_config_ring(phba, i, pmb);
1295                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1296                 if (rc != MBX_SUCCESS) {
1297                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1298                                         "0446 Adapter failed to init (%d), "
1299                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1300                                         "ring %d\n",
1301                                         rc, pmbox->mbxCommand,
1302                                         pmbox->mbxStatus, i);
1303                         phba->link_state = LPFC_HBA_ERROR;
1304                         ret = -ENXIO;
1305                         break;
1306                 }
1307         }
1308         mempool_free(pmb, phba->mbox_mem_pool);
1309         return ret;
1310 }
1311
1312 /**
1313  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1314  * @phba: Pointer to HBA context object.
1315  * @pring: Pointer to driver SLI ring object.
1316  * @piocb: Pointer to the driver iocb object.
1317  *
1318  * This function is called with hbalock held. The function adds the
1319  * new iocb to txcmplq of the given ring. This function always returns
1320  * 0. If this function is called for ELS ring, this function checks if
1321  * there is a vport associated with the ELS command. This function also
1322  * starts els_tmofunc timer if this is an ELS command.
1323  **/
1324 static int
1325 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1326                         struct lpfc_iocbq *piocb)
1327 {
1328         lockdep_assert_held(&phba->hbalock);
1329
1330         BUG_ON(!piocb);
1331
1332         list_add_tail(&piocb->list, &pring->txcmplq);
1333         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1334
1335         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1336            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1337            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1338                 BUG_ON(!piocb->vport);
1339                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1340                         mod_timer(&piocb->vport->els_tmofunc,
1341                                   jiffies +
1342                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1343         }
1344
1345         return 0;
1346 }
1347
1348 /**
1349  * lpfc_sli_ringtx_get - Get first element of the txq
1350  * @phba: Pointer to HBA context object.
1351  * @pring: Pointer to driver SLI ring object.
1352  *
1353  * This function is called with hbalock held to get next
1354  * iocb in txq of the given ring. If there is any iocb in
1355  * the txq, the function returns first iocb in the list after
1356  * removing the iocb from the list, else it returns NULL.
1357  **/
1358 struct lpfc_iocbq *
1359 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1360 {
1361         struct lpfc_iocbq *cmd_iocb;
1362
1363         lockdep_assert_held(&phba->hbalock);
1364
1365         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1366         return cmd_iocb;
1367 }
1368
1369 /**
1370  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1371  * @phba: Pointer to HBA context object.
1372  * @pring: Pointer to driver SLI ring object.
1373  *
1374  * This function is called with hbalock held and the caller must post the
1375  * iocb without releasing the lock. If the caller releases the lock,
1376  * iocb slot returned by the function is not guaranteed to be available.
1377  * The function returns pointer to the next available iocb slot if there
1378  * is available slot in the ring, else it returns NULL.
1379  * If the get index of the ring is ahead of the put index, the function
1380  * will post an error attention event to the worker thread to take the
1381  * HBA to offline state.
1382  **/
1383 static IOCB_t *
1384 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1385 {
1386         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1387         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1388
1389         lockdep_assert_held(&phba->hbalock);
1390
1391         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1392            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1393                 pring->sli.sli3.next_cmdidx = 0;
1394
1395         if (unlikely(pring->sli.sli3.local_getidx ==
1396                 pring->sli.sli3.next_cmdidx)) {
1397
1398                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1399
1400                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1401                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1402                                         "0315 Ring %d issue: portCmdGet %d "
1403                                         "is bigger than cmd ring %d\n",
1404                                         pring->ringno,
1405                                         pring->sli.sli3.local_getidx,
1406                                         max_cmd_idx);
1407
1408                         phba->link_state = LPFC_HBA_ERROR;
1409                         /*
1410                          * All error attention handlers are posted to
1411                          * worker thread
1412                          */
1413                         phba->work_ha |= HA_ERATT;
1414                         phba->work_hs = HS_FFER3;
1415
1416                         lpfc_worker_wake_up(phba);
1417
1418                         return NULL;
1419                 }
1420
1421                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1422                         return NULL;
1423         }
1424
1425         return lpfc_cmd_iocb(phba, pring);
1426 }
1427
1428 /**
1429  * lpfc_sli_next_iotag - Get an iotag for the iocb
1430  * @phba: Pointer to HBA context object.
1431  * @iocbq: Pointer to driver iocb object.
1432  *
1433  * This function gets an iotag for the iocb. If there is no unused iotag and
1434  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1435  * array and assigns a new iotag.
1436  * The function returns the allocated iotag if successful, else returns zero.
1437  * Zero is not a valid iotag.
1438  * The caller is not required to hold any lock.
1439  **/
1440 uint16_t
1441 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1442 {
1443         struct lpfc_iocbq **new_arr;
1444         struct lpfc_iocbq **old_arr;
1445         size_t new_len;
1446         struct lpfc_sli *psli = &phba->sli;
1447         uint16_t iotag;
1448
1449         spin_lock_irq(&phba->hbalock);
1450         iotag = psli->last_iotag;
1451         if(++iotag < psli->iocbq_lookup_len) {
1452                 psli->last_iotag = iotag;
1453                 psli->iocbq_lookup[iotag] = iocbq;
1454                 spin_unlock_irq(&phba->hbalock);
1455                 iocbq->iotag = iotag;
1456                 return iotag;
1457         } else if (psli->iocbq_lookup_len < (0xffff
1458                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1459                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1460                 spin_unlock_irq(&phba->hbalock);
1461                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1462                                   GFP_KERNEL);
1463                 if (new_arr) {
1464                         spin_lock_irq(&phba->hbalock);
1465                         old_arr = psli->iocbq_lookup;
1466                         if (new_len <= psli->iocbq_lookup_len) {
1467                                 /* highly unprobable case */
1468                                 kfree(new_arr);
1469                                 iotag = psli->last_iotag;
1470                                 if(++iotag < psli->iocbq_lookup_len) {
1471                                         psli->last_iotag = iotag;
1472                                         psli->iocbq_lookup[iotag] = iocbq;
1473                                         spin_unlock_irq(&phba->hbalock);
1474                                         iocbq->iotag = iotag;
1475                                         return iotag;
1476                                 }
1477                                 spin_unlock_irq(&phba->hbalock);
1478                                 return 0;
1479                         }
1480                         if (psli->iocbq_lookup)
1481                                 memcpy(new_arr, old_arr,
1482                                        ((psli->last_iotag  + 1) *
1483                                         sizeof (struct lpfc_iocbq *)));
1484                         psli->iocbq_lookup = new_arr;
1485                         psli->iocbq_lookup_len = new_len;
1486                         psli->last_iotag = iotag;
1487                         psli->iocbq_lookup[iotag] = iocbq;
1488                         spin_unlock_irq(&phba->hbalock);
1489                         iocbq->iotag = iotag;
1490                         kfree(old_arr);
1491                         return iotag;
1492                 }
1493         } else
1494                 spin_unlock_irq(&phba->hbalock);
1495
1496         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1497                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1498                         psli->last_iotag);
1499
1500         return 0;
1501 }
1502
1503 /**
1504  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1505  * @phba: Pointer to HBA context object.
1506  * @pring: Pointer to driver SLI ring object.
1507  * @iocb: Pointer to iocb slot in the ring.
1508  * @nextiocb: Pointer to driver iocb object which need to be
1509  *            posted to firmware.
1510  *
1511  * This function is called with hbalock held to post a new iocb to
1512  * the firmware. This function copies the new iocb to ring iocb slot and
1513  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1514  * a completion call back for this iocb else the function will free the
1515  * iocb object.
1516  **/
1517 static void
1518 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1519                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1520 {
1521         lockdep_assert_held(&phba->hbalock);
1522         /*
1523          * Set up an iotag
1524          */
1525         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1526
1527
1528         if (pring->ringno == LPFC_ELS_RING) {
1529                 lpfc_debugfs_slow_ring_trc(phba,
1530                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1531                         *(((uint32_t *) &nextiocb->iocb) + 4),
1532                         *(((uint32_t *) &nextiocb->iocb) + 6),
1533                         *(((uint32_t *) &nextiocb->iocb) + 7));
1534         }
1535
1536         /*
1537          * Issue iocb command to adapter
1538          */
1539         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1540         wmb();
1541         pring->stats.iocb_cmd++;
1542
1543         /*
1544          * If there is no completion routine to call, we can release the
1545          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1546          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1547          */
1548         if (nextiocb->iocb_cmpl)
1549                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1550         else
1551                 __lpfc_sli_release_iocbq(phba, nextiocb);
1552
1553         /*
1554          * Let the HBA know what IOCB slot will be the next one the
1555          * driver will put a command into.
1556          */
1557         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1558         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1559 }
1560
1561 /**
1562  * lpfc_sli_update_full_ring - Update the chip attention register
1563  * @phba: Pointer to HBA context object.
1564  * @pring: Pointer to driver SLI ring object.
1565  *
1566  * The caller is not required to hold any lock for calling this function.
1567  * This function updates the chip attention bits for the ring to inform firmware
1568  * that there are pending work to be done for this ring and requests an
1569  * interrupt when there is space available in the ring. This function is
1570  * called when the driver is unable to post more iocbs to the ring due
1571  * to unavailability of space in the ring.
1572  **/
1573 static void
1574 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1575 {
1576         int ringno = pring->ringno;
1577
1578         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1579
1580         wmb();
1581
1582         /*
1583          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1584          * The HBA will tell us when an IOCB entry is available.
1585          */
1586         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1587         readl(phba->CAregaddr); /* flush */
1588
1589         pring->stats.iocb_cmd_full++;
1590 }
1591
1592 /**
1593  * lpfc_sli_update_ring - Update chip attention register
1594  * @phba: Pointer to HBA context object.
1595  * @pring: Pointer to driver SLI ring object.
1596  *
1597  * This function updates the chip attention register bit for the
1598  * given ring to inform HBA that there is more work to be done
1599  * in this ring. The caller is not required to hold any lock.
1600  **/
1601 static void
1602 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1603 {
1604         int ringno = pring->ringno;
1605
1606         /*
1607          * Tell the HBA that there is work to do in this ring.
1608          */
1609         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1610                 wmb();
1611                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1612                 readl(phba->CAregaddr); /* flush */
1613         }
1614 }
1615
1616 /**
1617  * lpfc_sli_resume_iocb - Process iocbs in the txq
1618  * @phba: Pointer to HBA context object.
1619  * @pring: Pointer to driver SLI ring object.
1620  *
1621  * This function is called with hbalock held to post pending iocbs
1622  * in the txq to the firmware. This function is called when driver
1623  * detects space available in the ring.
1624  **/
1625 static void
1626 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1627 {
1628         IOCB_t *iocb;
1629         struct lpfc_iocbq *nextiocb;
1630
1631         lockdep_assert_held(&phba->hbalock);
1632
1633         /*
1634          * Check to see if:
1635          *  (a) there is anything on the txq to send
1636          *  (b) link is up
1637          *  (c) link attention events can be processed (fcp ring only)
1638          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1639          */
1640
1641         if (lpfc_is_link_up(phba) &&
1642             (!list_empty(&pring->txq)) &&
1643             (pring->ringno != phba->sli.fcp_ring ||
1644              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1645
1646                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1647                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1648                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1649
1650                 if (iocb)
1651                         lpfc_sli_update_ring(phba, pring);
1652                 else
1653                         lpfc_sli_update_full_ring(phba, pring);
1654         }
1655
1656         return;
1657 }
1658
1659 /**
1660  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1661  * @phba: Pointer to HBA context object.
1662  * @hbqno: HBQ number.
1663  *
1664  * This function is called with hbalock held to get the next
1665  * available slot for the given HBQ. If there is free slot
1666  * available for the HBQ it will return pointer to the next available
1667  * HBQ entry else it will return NULL.
1668  **/
1669 static struct lpfc_hbq_entry *
1670 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1671 {
1672         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1673
1674         lockdep_assert_held(&phba->hbalock);
1675
1676         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1677             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1678                 hbqp->next_hbqPutIdx = 0;
1679
1680         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1681                 uint32_t raw_index = phba->hbq_get[hbqno];
1682                 uint32_t getidx = le32_to_cpu(raw_index);
1683
1684                 hbqp->local_hbqGetIdx = getidx;
1685
1686                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1687                         lpfc_printf_log(phba, KERN_ERR,
1688                                         LOG_SLI | LOG_VPORT,
1689                                         "1802 HBQ %d: local_hbqGetIdx "
1690                                         "%u is > than hbqp->entry_count %u\n",
1691                                         hbqno, hbqp->local_hbqGetIdx,
1692                                         hbqp->entry_count);
1693
1694                         phba->link_state = LPFC_HBA_ERROR;
1695                         return NULL;
1696                 }
1697
1698                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1699                         return NULL;
1700         }
1701
1702         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1703                         hbqp->hbqPutIdx;
1704 }
1705
1706 /**
1707  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1708  * @phba: Pointer to HBA context object.
1709  *
1710  * This function is called with no lock held to free all the
1711  * hbq buffers while uninitializing the SLI interface. It also
1712  * frees the HBQ buffers returned by the firmware but not yet
1713  * processed by the upper layers.
1714  **/
1715 void
1716 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1717 {
1718         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1719         struct hbq_dmabuf *hbq_buf;
1720         unsigned long flags;
1721         int i, hbq_count;
1722         uint32_t hbqno;
1723
1724         hbq_count = lpfc_sli_hbq_count();
1725         /* Return all memory used by all HBQs */
1726         spin_lock_irqsave(&phba->hbalock, flags);
1727         for (i = 0; i < hbq_count; ++i) {
1728                 list_for_each_entry_safe(dmabuf, next_dmabuf,
1729                                 &phba->hbqs[i].hbq_buffer_list, list) {
1730                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1731                         list_del(&hbq_buf->dbuf.list);
1732                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1733                 }
1734                 phba->hbqs[i].buffer_count = 0;
1735         }
1736         /* Return all HBQ buffer that are in-fly */
1737         list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1738                                  list) {
1739                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1740                 list_del(&hbq_buf->dbuf.list);
1741                 if (hbq_buf->tag == -1) {
1742                         (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1743                                 (phba, hbq_buf);
1744                 } else {
1745                         hbqno = hbq_buf->tag >> 16;
1746                         if (hbqno >= LPFC_MAX_HBQS)
1747                                 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1748                                         (phba, hbq_buf);
1749                         else
1750                                 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1751                                         hbq_buf);
1752                 }
1753         }
1754
1755         /* Mark the HBQs not in use */
1756         phba->hbq_in_use = 0;
1757         spin_unlock_irqrestore(&phba->hbalock, flags);
1758 }
1759
1760 /**
1761  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1762  * @phba: Pointer to HBA context object.
1763  * @hbqno: HBQ number.
1764  * @hbq_buf: Pointer to HBQ buffer.
1765  *
1766  * This function is called with the hbalock held to post a
1767  * hbq buffer to the firmware. If the function finds an empty
1768  * slot in the HBQ, it will post the buffer. The function will return
1769  * pointer to the hbq entry if it successfully post the buffer
1770  * else it will return NULL.
1771  **/
1772 static int
1773 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1774                          struct hbq_dmabuf *hbq_buf)
1775 {
1776         lockdep_assert_held(&phba->hbalock);
1777         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1778 }
1779
1780 /**
1781  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1782  * @phba: Pointer to HBA context object.
1783  * @hbqno: HBQ number.
1784  * @hbq_buf: Pointer to HBQ buffer.
1785  *
1786  * This function is called with the hbalock held to post a hbq buffer to the
1787  * firmware. If the function finds an empty slot in the HBQ, it will post the
1788  * buffer and place it on the hbq_buffer_list. The function will return zero if
1789  * it successfully post the buffer else it will return an error.
1790  **/
1791 static int
1792 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1793                             struct hbq_dmabuf *hbq_buf)
1794 {
1795         struct lpfc_hbq_entry *hbqe;
1796         dma_addr_t physaddr = hbq_buf->dbuf.phys;
1797
1798         lockdep_assert_held(&phba->hbalock);
1799         /* Get next HBQ entry slot to use */
1800         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1801         if (hbqe) {
1802                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1803
1804                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1805                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1806                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1807                 hbqe->bde.tus.f.bdeFlags = 0;
1808                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1809                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1810                                 /* Sync SLIM */
1811                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1812                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1813                                 /* flush */
1814                 readl(phba->hbq_put + hbqno);
1815                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1816                 return 0;
1817         } else
1818                 return -ENOMEM;
1819 }
1820
1821 /**
1822  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1823  * @phba: Pointer to HBA context object.
1824  * @hbqno: HBQ number.
1825  * @hbq_buf: Pointer to HBQ buffer.
1826  *
1827  * This function is called with the hbalock held to post an RQE to the SLI4
1828  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1829  * the hbq_buffer_list and return zero, otherwise it will return an error.
1830  **/
1831 static int
1832 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1833                             struct hbq_dmabuf *hbq_buf)
1834 {
1835         int rc;
1836         struct lpfc_rqe hrqe;
1837         struct lpfc_rqe drqe;
1838
1839         lockdep_assert_held(&phba->hbalock);
1840         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1841         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1842         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1843         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1844         rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1845                               &hrqe, &drqe);
1846         if (rc < 0)
1847                 return rc;
1848         hbq_buf->tag = rc;
1849         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1850         return 0;
1851 }
1852
1853 /* HBQ for ELS and CT traffic. */
1854 static struct lpfc_hbq_init lpfc_els_hbq = {
1855         .rn = 1,
1856         .entry_count = 256,
1857         .mask_count = 0,
1858         .profile = 0,
1859         .ring_mask = (1 << LPFC_ELS_RING),
1860         .buffer_count = 0,
1861         .init_count = 40,
1862         .add_count = 40,
1863 };
1864
1865 /* HBQ for the extra ring if needed */
1866 static struct lpfc_hbq_init lpfc_extra_hbq = {
1867         .rn = 1,
1868         .entry_count = 200,
1869         .mask_count = 0,
1870         .profile = 0,
1871         .ring_mask = (1 << LPFC_EXTRA_RING),
1872         .buffer_count = 0,
1873         .init_count = 0,
1874         .add_count = 5,
1875 };
1876
1877 /* Array of HBQs */
1878 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1879         &lpfc_els_hbq,
1880         &lpfc_extra_hbq,
1881 };
1882
1883 /**
1884  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1885  * @phba: Pointer to HBA context object.
1886  * @hbqno: HBQ number.
1887  * @count: Number of HBQ buffers to be posted.
1888  *
1889  * This function is called with no lock held to post more hbq buffers to the
1890  * given HBQ. The function returns the number of HBQ buffers successfully
1891  * posted.
1892  **/
1893 static int
1894 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1895 {
1896         uint32_t i, posted = 0;
1897         unsigned long flags;
1898         struct hbq_dmabuf *hbq_buffer;
1899         LIST_HEAD(hbq_buf_list);
1900         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1901                 return 0;
1902
1903         if ((phba->hbqs[hbqno].buffer_count + count) >
1904             lpfc_hbq_defs[hbqno]->entry_count)
1905                 count = lpfc_hbq_defs[hbqno]->entry_count -
1906                                         phba->hbqs[hbqno].buffer_count;
1907         if (!count)
1908                 return 0;
1909         /* Allocate HBQ entries */
1910         for (i = 0; i < count; i++) {
1911                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1912                 if (!hbq_buffer)
1913                         break;
1914                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1915         }
1916         /* Check whether HBQ is still in use */
1917         spin_lock_irqsave(&phba->hbalock, flags);
1918         if (!phba->hbq_in_use)
1919                 goto err;
1920         while (!list_empty(&hbq_buf_list)) {
1921                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1922                                  dbuf.list);
1923                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1924                                       (hbqno << 16));
1925                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1926                         phba->hbqs[hbqno].buffer_count++;
1927                         posted++;
1928                 } else
1929                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1930         }
1931         spin_unlock_irqrestore(&phba->hbalock, flags);
1932         return posted;
1933 err:
1934         spin_unlock_irqrestore(&phba->hbalock, flags);
1935         while (!list_empty(&hbq_buf_list)) {
1936                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1937                                  dbuf.list);
1938                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1939         }
1940         return 0;
1941 }
1942
1943 /**
1944  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1945  * @phba: Pointer to HBA context object.
1946  * @qno: HBQ number.
1947  *
1948  * This function posts more buffers to the HBQ. This function
1949  * is called with no lock held. The function returns the number of HBQ entries
1950  * successfully allocated.
1951  **/
1952 int
1953 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1954 {
1955         if (phba->sli_rev == LPFC_SLI_REV4)
1956                 return 0;
1957         else
1958                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1959                                          lpfc_hbq_defs[qno]->add_count);
1960 }
1961
1962 /**
1963  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1964  * @phba: Pointer to HBA context object.
1965  * @qno:  HBQ queue number.
1966  *
1967  * This function is called from SLI initialization code path with
1968  * no lock held to post initial HBQ buffers to firmware. The
1969  * function returns the number of HBQ entries successfully allocated.
1970  **/
1971 static int
1972 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1973 {
1974         if (phba->sli_rev == LPFC_SLI_REV4)
1975                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1976                                         lpfc_hbq_defs[qno]->entry_count);
1977         else
1978                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1979                                          lpfc_hbq_defs[qno]->init_count);
1980 }
1981
1982 /**
1983  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1984  * @phba: Pointer to HBA context object.
1985  * @hbqno: HBQ number.
1986  *
1987  * This function removes the first hbq buffer on an hbq list and returns a
1988  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1989  **/
1990 static struct hbq_dmabuf *
1991 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1992 {
1993         struct lpfc_dmabuf *d_buf;
1994
1995         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1996         if (!d_buf)
1997                 return NULL;
1998         return container_of(d_buf, struct hbq_dmabuf, dbuf);
1999 }
2000
2001 /**
2002  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2003  * @phba: Pointer to HBA context object.
2004  * @tag: Tag of the hbq buffer.
2005  *
2006  * This function searches for the hbq buffer associated with the given tag in
2007  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2008  * otherwise it returns NULL.
2009  **/
2010 static struct hbq_dmabuf *
2011 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2012 {
2013         struct lpfc_dmabuf *d_buf;
2014         struct hbq_dmabuf *hbq_buf;
2015         uint32_t hbqno;
2016
2017         hbqno = tag >> 16;
2018         if (hbqno >= LPFC_MAX_HBQS)
2019                 return NULL;
2020
2021         spin_lock_irq(&phba->hbalock);
2022         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2023                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2024                 if (hbq_buf->tag == tag) {
2025                         spin_unlock_irq(&phba->hbalock);
2026                         return hbq_buf;
2027                 }
2028         }
2029         spin_unlock_irq(&phba->hbalock);
2030         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2031                         "1803 Bad hbq tag. Data: x%x x%x\n",
2032                         tag, phba->hbqs[tag >> 16].buffer_count);
2033         return NULL;
2034 }
2035
2036 /**
2037  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2038  * @phba: Pointer to HBA context object.
2039  * @hbq_buffer: Pointer to HBQ buffer.
2040  *
2041  * This function is called with hbalock. This function gives back
2042  * the hbq buffer to firmware. If the HBQ does not have space to
2043  * post the buffer, it will free the buffer.
2044  **/
2045 void
2046 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2047 {
2048         uint32_t hbqno;
2049
2050         if (hbq_buffer) {
2051                 hbqno = hbq_buffer->tag >> 16;
2052                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2053                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2054         }
2055 }
2056
2057 /**
2058  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2059  * @mbxCommand: mailbox command code.
2060  *
2061  * This function is called by the mailbox event handler function to verify
2062  * that the completed mailbox command is a legitimate mailbox command. If the
2063  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2064  * and the mailbox event handler will take the HBA offline.
2065  **/
2066 static int
2067 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2068 {
2069         uint8_t ret;
2070
2071         switch (mbxCommand) {
2072         case MBX_LOAD_SM:
2073         case MBX_READ_NV:
2074         case MBX_WRITE_NV:
2075         case MBX_WRITE_VPARMS:
2076         case MBX_RUN_BIU_DIAG:
2077         case MBX_INIT_LINK:
2078         case MBX_DOWN_LINK:
2079         case MBX_CONFIG_LINK:
2080         case MBX_CONFIG_RING:
2081         case MBX_RESET_RING:
2082         case MBX_READ_CONFIG:
2083         case MBX_READ_RCONFIG:
2084         case MBX_READ_SPARM:
2085         case MBX_READ_STATUS:
2086         case MBX_READ_RPI:
2087         case MBX_READ_XRI:
2088         case MBX_READ_REV:
2089         case MBX_READ_LNK_STAT:
2090         case MBX_REG_LOGIN:
2091         case MBX_UNREG_LOGIN:
2092         case MBX_CLEAR_LA:
2093         case MBX_DUMP_MEMORY:
2094         case MBX_DUMP_CONTEXT:
2095         case MBX_RUN_DIAGS:
2096         case MBX_RESTART:
2097         case MBX_UPDATE_CFG:
2098         case MBX_DOWN_LOAD:
2099         case MBX_DEL_LD_ENTRY:
2100         case MBX_RUN_PROGRAM:
2101         case MBX_SET_MASK:
2102         case MBX_SET_VARIABLE:
2103         case MBX_UNREG_D_ID:
2104         case MBX_KILL_BOARD:
2105         case MBX_CONFIG_FARP:
2106         case MBX_BEACON:
2107         case MBX_LOAD_AREA:
2108         case MBX_RUN_BIU_DIAG64:
2109         case MBX_CONFIG_PORT:
2110         case MBX_READ_SPARM64:
2111         case MBX_READ_RPI64:
2112         case MBX_REG_LOGIN64:
2113         case MBX_READ_TOPOLOGY:
2114         case MBX_WRITE_WWN:
2115         case MBX_SET_DEBUG:
2116         case MBX_LOAD_EXP_ROM:
2117         case MBX_ASYNCEVT_ENABLE:
2118         case MBX_REG_VPI:
2119         case MBX_UNREG_VPI:
2120         case MBX_HEARTBEAT:
2121         case MBX_PORT_CAPABILITIES:
2122         case MBX_PORT_IOV_CONTROL:
2123         case MBX_SLI4_CONFIG:
2124         case MBX_SLI4_REQ_FTRS:
2125         case MBX_REG_FCFI:
2126         case MBX_UNREG_FCFI:
2127         case MBX_REG_VFI:
2128         case MBX_UNREG_VFI:
2129         case MBX_INIT_VPI:
2130         case MBX_INIT_VFI:
2131         case MBX_RESUME_RPI:
2132         case MBX_READ_EVENT_LOG_STATUS:
2133         case MBX_READ_EVENT_LOG:
2134         case MBX_SECURITY_MGMT:
2135         case MBX_AUTH_PORT:
2136         case MBX_ACCESS_VDATA:
2137                 ret = mbxCommand;
2138                 break;
2139         default:
2140                 ret = MBX_SHUTDOWN;
2141                 break;
2142         }
2143         return ret;
2144 }
2145
2146 /**
2147  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2148  * @phba: Pointer to HBA context object.
2149  * @pmboxq: Pointer to mailbox command.
2150  *
2151  * This is completion handler function for mailbox commands issued from
2152  * lpfc_sli_issue_mbox_wait function. This function is called by the
2153  * mailbox event handler function with no lock held. This function
2154  * will wake up thread waiting on the wait queue pointed by context1
2155  * of the mailbox.
2156  **/
2157 void
2158 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2159 {
2160         wait_queue_head_t *pdone_q;
2161         unsigned long drvr_flag;
2162
2163         /*
2164          * If pdone_q is empty, the driver thread gave up waiting and
2165          * continued running.
2166          */
2167         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2168         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2169         pdone_q = (wait_queue_head_t *) pmboxq->context1;
2170         if (pdone_q)
2171                 wake_up_interruptible(pdone_q);
2172         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2173         return;
2174 }
2175
2176
2177 /**
2178  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2179  * @phba: Pointer to HBA context object.
2180  * @pmb: Pointer to mailbox object.
2181  *
2182  * This function is the default mailbox completion handler. It
2183  * frees the memory resources associated with the completed mailbox
2184  * command. If the completed command is a REG_LOGIN mailbox command,
2185  * this function will issue a UREG_LOGIN to re-claim the RPI.
2186  **/
2187 void
2188 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2189 {
2190         struct lpfc_vport  *vport = pmb->vport;
2191         struct lpfc_dmabuf *mp;
2192         struct lpfc_nodelist *ndlp;
2193         struct Scsi_Host *shost;
2194         uint16_t rpi, vpi;
2195         int rc;
2196
2197         mp = (struct lpfc_dmabuf *) (pmb->context1);
2198
2199         if (mp) {
2200                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2201                 kfree(mp);
2202         }
2203
2204         /*
2205          * If a REG_LOGIN succeeded  after node is destroyed or node
2206          * is in re-discovery driver need to cleanup the RPI.
2207          */
2208         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2209             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2210             !pmb->u.mb.mbxStatus) {
2211                 rpi = pmb->u.mb.un.varWords[0];
2212                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2213                 if (phba->sli_rev == LPFC_SLI_REV4)
2214                         vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2215                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2216                 pmb->vport = vport;
2217                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2218                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2219                 if (rc != MBX_NOT_FINISHED)
2220                         return;
2221         }
2222
2223         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2224                 !(phba->pport->load_flag & FC_UNLOADING) &&
2225                 !pmb->u.mb.mbxStatus) {
2226                 shost = lpfc_shost_from_vport(vport);
2227                 spin_lock_irq(shost->host_lock);
2228                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2229                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2230                 spin_unlock_irq(shost->host_lock);
2231         }
2232
2233         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2234                 ndlp = (struct lpfc_nodelist *)pmb->context2;
2235                 lpfc_nlp_put(ndlp);
2236                 pmb->context2 = NULL;
2237         }
2238
2239         /* Check security permission status on INIT_LINK mailbox command */
2240         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2241             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2242                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2243                                 "2860 SLI authentication is required "
2244                                 "for INIT_LINK but has not done yet\n");
2245
2246         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2247                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2248         else
2249                 mempool_free(pmb, phba->mbox_mem_pool);
2250 }
2251  /**
2252  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2253  * @phba: Pointer to HBA context object.
2254  * @pmb: Pointer to mailbox object.
2255  *
2256  * This function is the unreg rpi mailbox completion handler. It
2257  * frees the memory resources associated with the completed mailbox
2258  * command. An additional refrenece is put on the ndlp to prevent
2259  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2260  * the unreg mailbox command completes, this routine puts the
2261  * reference back.
2262  *
2263  **/
2264 void
2265 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2266 {
2267         struct lpfc_vport  *vport = pmb->vport;
2268         struct lpfc_nodelist *ndlp;
2269
2270         ndlp = pmb->context1;
2271         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2272                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2273                     (bf_get(lpfc_sli_intf_if_type,
2274                      &phba->sli4_hba.sli_intf) ==
2275                      LPFC_SLI_INTF_IF_TYPE_2)) {
2276                         if (ndlp) {
2277                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2278                                                  "0010 UNREG_LOGIN vpi:%x "
2279                                                  "rpi:%x DID:%x map:%x %p\n",
2280                                                  vport->vpi, ndlp->nlp_rpi,
2281                                                  ndlp->nlp_DID,
2282                                                  ndlp->nlp_usg_map, ndlp);
2283                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2284                                 lpfc_nlp_put(ndlp);
2285                         }
2286                 }
2287         }
2288
2289         mempool_free(pmb, phba->mbox_mem_pool);
2290 }
2291
2292 /**
2293  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2294  * @phba: Pointer to HBA context object.
2295  *
2296  * This function is called with no lock held. This function processes all
2297  * the completed mailbox commands and gives it to upper layers. The interrupt
2298  * service routine processes mailbox completion interrupt and adds completed
2299  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2300  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2301  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2302  * function returns the mailbox commands to the upper layer by calling the
2303  * completion handler function of each mailbox.
2304  **/
2305 int
2306 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2307 {
2308         MAILBOX_t *pmbox;
2309         LPFC_MBOXQ_t *pmb;
2310         int rc;
2311         LIST_HEAD(cmplq);
2312
2313         phba->sli.slistat.mbox_event++;
2314
2315         /* Get all completed mailboxe buffers into the cmplq */
2316         spin_lock_irq(&phba->hbalock);
2317         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2318         spin_unlock_irq(&phba->hbalock);
2319
2320         /* Get a Mailbox buffer to setup mailbox commands for callback */
2321         do {
2322                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2323                 if (pmb == NULL)
2324                         break;
2325
2326                 pmbox = &pmb->u.mb;
2327
2328                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2329                         if (pmb->vport) {
2330                                 lpfc_debugfs_disc_trc(pmb->vport,
2331                                         LPFC_DISC_TRC_MBOX_VPORT,
2332                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2333                                         (uint32_t)pmbox->mbxCommand,
2334                                         pmbox->un.varWords[0],
2335                                         pmbox->un.varWords[1]);
2336                         }
2337                         else {
2338                                 lpfc_debugfs_disc_trc(phba->pport,
2339                                         LPFC_DISC_TRC_MBOX,
2340                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2341                                         (uint32_t)pmbox->mbxCommand,
2342                                         pmbox->un.varWords[0],
2343                                         pmbox->un.varWords[1]);
2344                         }
2345                 }
2346
2347                 /*
2348                  * It is a fatal error if unknown mbox command completion.
2349                  */
2350                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2351                     MBX_SHUTDOWN) {
2352                         /* Unknown mailbox command compl */
2353                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2354                                         "(%d):0323 Unknown Mailbox command "
2355                                         "x%x (x%x/x%x) Cmpl\n",
2356                                         pmb->vport ? pmb->vport->vpi : 0,
2357                                         pmbox->mbxCommand,
2358                                         lpfc_sli_config_mbox_subsys_get(phba,
2359                                                                         pmb),
2360                                         lpfc_sli_config_mbox_opcode_get(phba,
2361                                                                         pmb));
2362                         phba->link_state = LPFC_HBA_ERROR;
2363                         phba->work_hs = HS_FFER3;
2364                         lpfc_handle_eratt(phba);
2365                         continue;
2366                 }
2367
2368                 if (pmbox->mbxStatus) {
2369                         phba->sli.slistat.mbox_stat_err++;
2370                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2371                                 /* Mbox cmd cmpl error - RETRYing */
2372                                 lpfc_printf_log(phba, KERN_INFO,
2373                                         LOG_MBOX | LOG_SLI,
2374                                         "(%d):0305 Mbox cmd cmpl "
2375                                         "error - RETRYing Data: x%x "
2376                                         "(x%x/x%x) x%x x%x x%x\n",
2377                                         pmb->vport ? pmb->vport->vpi : 0,
2378                                         pmbox->mbxCommand,
2379                                         lpfc_sli_config_mbox_subsys_get(phba,
2380                                                                         pmb),
2381                                         lpfc_sli_config_mbox_opcode_get(phba,
2382                                                                         pmb),
2383                                         pmbox->mbxStatus,
2384                                         pmbox->un.varWords[0],
2385                                         pmb->vport->port_state);
2386                                 pmbox->mbxStatus = 0;
2387                                 pmbox->mbxOwner = OWN_HOST;
2388                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2389                                 if (rc != MBX_NOT_FINISHED)
2390                                         continue;
2391                         }
2392                 }
2393
2394                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2395                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2396                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2397                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2398                                 "x%x x%x x%x\n",
2399                                 pmb->vport ? pmb->vport->vpi : 0,
2400                                 pmbox->mbxCommand,
2401                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2402                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2403                                 pmb->mbox_cmpl,
2404                                 *((uint32_t *) pmbox),
2405                                 pmbox->un.varWords[0],
2406                                 pmbox->un.varWords[1],
2407                                 pmbox->un.varWords[2],
2408                                 pmbox->un.varWords[3],
2409                                 pmbox->un.varWords[4],
2410                                 pmbox->un.varWords[5],
2411                                 pmbox->un.varWords[6],
2412                                 pmbox->un.varWords[7],
2413                                 pmbox->un.varWords[8],
2414                                 pmbox->un.varWords[9],
2415                                 pmbox->un.varWords[10]);
2416
2417                 if (pmb->mbox_cmpl)
2418                         pmb->mbox_cmpl(phba,pmb);
2419         } while (1);
2420         return 0;
2421 }
2422
2423 /**
2424  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2425  * @phba: Pointer to HBA context object.
2426  * @pring: Pointer to driver SLI ring object.
2427  * @tag: buffer tag.
2428  *
2429  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2430  * is set in the tag the buffer is posted for a particular exchange,
2431  * the function will return the buffer without replacing the buffer.
2432  * If the buffer is for unsolicited ELS or CT traffic, this function
2433  * returns the buffer and also posts another buffer to the firmware.
2434  **/
2435 static struct lpfc_dmabuf *
2436 lpfc_sli_get_buff(struct lpfc_hba *phba,
2437                   struct lpfc_sli_ring *pring,
2438                   uint32_t tag)
2439 {
2440         struct hbq_dmabuf *hbq_entry;
2441
2442         if (tag & QUE_BUFTAG_BIT)
2443                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2444         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2445         if (!hbq_entry)
2446                 return NULL;
2447         return &hbq_entry->dbuf;
2448 }
2449
2450 /**
2451  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2452  * @phba: Pointer to HBA context object.
2453  * @pring: Pointer to driver SLI ring object.
2454  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2455  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2456  * @fch_type: the type for the first frame of the sequence.
2457  *
2458  * This function is called with no lock held. This function uses the r_ctl and
2459  * type of the received sequence to find the correct callback function to call
2460  * to process the sequence.
2461  **/
2462 static int
2463 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2464                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2465                          uint32_t fch_type)
2466 {
2467         int i;
2468
2469         /* unSolicited Responses */
2470         if (pring->prt[0].profile) {
2471                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2472                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2473                                                                         saveq);
2474                 return 1;
2475         }
2476         /* We must search, based on rctl / type
2477            for the right routine */
2478         for (i = 0; i < pring->num_mask; i++) {
2479                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2480                     (pring->prt[i].type == fch_type)) {
2481                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2482                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2483                                                 (phba, pring, saveq);
2484                         return 1;
2485                 }
2486         }
2487         return 0;
2488 }
2489
2490 /**
2491  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2492  * @phba: Pointer to HBA context object.
2493  * @pring: Pointer to driver SLI ring object.
2494  * @saveq: Pointer to the unsolicited iocb.
2495  *
2496  * This function is called with no lock held by the ring event handler
2497  * when there is an unsolicited iocb posted to the response ring by the
2498  * firmware. This function gets the buffer associated with the iocbs
2499  * and calls the event handler for the ring. This function handles both
2500  * qring buffers and hbq buffers.
2501  * When the function returns 1 the caller can free the iocb object otherwise
2502  * upper layer functions will free the iocb objects.
2503  **/
2504 static int
2505 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2506                             struct lpfc_iocbq *saveq)
2507 {
2508         IOCB_t           * irsp;
2509         WORD5            * w5p;
2510         uint32_t           Rctl, Type;
2511         struct lpfc_iocbq *iocbq;
2512         struct lpfc_dmabuf *dmzbuf;
2513
2514         irsp = &(saveq->iocb);
2515
2516         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2517                 if (pring->lpfc_sli_rcv_async_status)
2518                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2519                 else
2520                         lpfc_printf_log(phba,
2521                                         KERN_WARNING,
2522                                         LOG_SLI,
2523                                         "0316 Ring %d handler: unexpected "
2524                                         "ASYNC_STATUS iocb received evt_code "
2525                                         "0x%x\n",
2526                                         pring->ringno,
2527                                         irsp->un.asyncstat.evt_code);
2528                 return 1;
2529         }
2530
2531         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2532                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2533                 if (irsp->ulpBdeCount > 0) {
2534                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2535                                         irsp->un.ulpWord[3]);
2536                         lpfc_in_buf_free(phba, dmzbuf);
2537                 }
2538
2539                 if (irsp->ulpBdeCount > 1) {
2540                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2541                                         irsp->unsli3.sli3Words[3]);
2542                         lpfc_in_buf_free(phba, dmzbuf);
2543                 }
2544
2545                 if (irsp->ulpBdeCount > 2) {
2546                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2547                                 irsp->unsli3.sli3Words[7]);
2548                         lpfc_in_buf_free(phba, dmzbuf);
2549                 }
2550
2551                 return 1;
2552         }
2553
2554         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2555                 if (irsp->ulpBdeCount != 0) {
2556                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2557                                                 irsp->un.ulpWord[3]);
2558                         if (!saveq->context2)
2559                                 lpfc_printf_log(phba,
2560                                         KERN_ERR,
2561                                         LOG_SLI,
2562                                         "0341 Ring %d Cannot find buffer for "
2563                                         "an unsolicited iocb. tag 0x%x\n",
2564                                         pring->ringno,
2565                                         irsp->un.ulpWord[3]);
2566                 }
2567                 if (irsp->ulpBdeCount == 2) {
2568                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2569                                                 irsp->unsli3.sli3Words[7]);
2570                         if (!saveq->context3)
2571                                 lpfc_printf_log(phba,
2572                                         KERN_ERR,
2573                                         LOG_SLI,
2574                                         "0342 Ring %d Cannot find buffer for an"
2575                                         " unsolicited iocb. tag 0x%x\n",
2576                                         pring->ringno,
2577                                         irsp->unsli3.sli3Words[7]);
2578                 }
2579                 list_for_each_entry(iocbq, &saveq->list, list) {
2580                         irsp = &(iocbq->iocb);
2581                         if (irsp->ulpBdeCount != 0) {
2582                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2583                                                         irsp->un.ulpWord[3]);
2584                                 if (!iocbq->context2)
2585                                         lpfc_printf_log(phba,
2586                                                 KERN_ERR,
2587                                                 LOG_SLI,
2588                                                 "0343 Ring %d Cannot find "
2589                                                 "buffer for an unsolicited iocb"
2590                                                 ". tag 0x%x\n", pring->ringno,
2591                                                 irsp->un.ulpWord[3]);
2592                         }
2593                         if (irsp->ulpBdeCount == 2) {
2594                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2595                                                 irsp->unsli3.sli3Words[7]);
2596                                 if (!iocbq->context3)
2597                                         lpfc_printf_log(phba,
2598                                                 KERN_ERR,
2599                                                 LOG_SLI,
2600                                                 "0344 Ring %d Cannot find "
2601                                                 "buffer for an unsolicited "
2602                                                 "iocb. tag 0x%x\n",
2603                                                 pring->ringno,
2604                                                 irsp->unsli3.sli3Words[7]);
2605                         }
2606                 }
2607         }
2608         if (irsp->ulpBdeCount != 0 &&
2609             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2610              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2611                 int found = 0;
2612
2613                 /* search continue save q for same XRI */
2614                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2615                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2616                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2617                                 list_add_tail(&saveq->list, &iocbq->list);
2618                                 found = 1;
2619                                 break;
2620                         }
2621                 }
2622                 if (!found)
2623                         list_add_tail(&saveq->clist,
2624                                       &pring->iocb_continue_saveq);
2625                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2626                         list_del_init(&iocbq->clist);
2627                         saveq = iocbq;
2628                         irsp = &(saveq->iocb);
2629                 } else
2630                         return 0;
2631         }
2632         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2633             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2634             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2635                 Rctl = FC_RCTL_ELS_REQ;
2636                 Type = FC_TYPE_ELS;
2637         } else {
2638                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2639                 Rctl = w5p->hcsw.Rctl;
2640                 Type = w5p->hcsw.Type;
2641
2642                 /* Firmware Workaround */
2643                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2644                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2645                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2646                         Rctl = FC_RCTL_ELS_REQ;
2647                         Type = FC_TYPE_ELS;
2648                         w5p->hcsw.Rctl = Rctl;
2649                         w5p->hcsw.Type = Type;
2650                 }
2651         }
2652
2653         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2654                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2655                                 "0313 Ring %d handler: unexpected Rctl x%x "
2656                                 "Type x%x received\n",
2657                                 pring->ringno, Rctl, Type);
2658
2659         return 1;
2660 }
2661
2662 /**
2663  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2664  * @phba: Pointer to HBA context object.
2665  * @pring: Pointer to driver SLI ring object.
2666  * @prspiocb: Pointer to response iocb object.
2667  *
2668  * This function looks up the iocb_lookup table to get the command iocb
2669  * corresponding to the given response iocb using the iotag of the
2670  * response iocb. This function is called with the hbalock held.
2671  * This function returns the command iocb object if it finds the command
2672  * iocb else returns NULL.
2673  **/
2674 static struct lpfc_iocbq *
2675 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2676                       struct lpfc_sli_ring *pring,
2677                       struct lpfc_iocbq *prspiocb)
2678 {
2679         struct lpfc_iocbq *cmd_iocb = NULL;
2680         uint16_t iotag;
2681         lockdep_assert_held(&phba->hbalock);
2682
2683         iotag = prspiocb->iocb.ulpIoTag;
2684
2685         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2686                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2687                 list_del_init(&cmd_iocb->list);
2688                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2689                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2690                 }
2691                 return cmd_iocb;
2692         }
2693
2694         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2695                         "0317 iotag x%x is out off "
2696                         "range: max iotag x%x wd0 x%x\n",
2697                         iotag, phba->sli.last_iotag,
2698                         *(((uint32_t *) &prspiocb->iocb) + 7));
2699         return NULL;
2700 }
2701
2702 /**
2703  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2704  * @phba: Pointer to HBA context object.
2705  * @pring: Pointer to driver SLI ring object.
2706  * @iotag: IOCB tag.
2707  *
2708  * This function looks up the iocb_lookup table to get the command iocb
2709  * corresponding to the given iotag. This function is called with the
2710  * hbalock held.
2711  * This function returns the command iocb object if it finds the command
2712  * iocb else returns NULL.
2713  **/
2714 static struct lpfc_iocbq *
2715 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2716                              struct lpfc_sli_ring *pring, uint16_t iotag)
2717 {
2718         struct lpfc_iocbq *cmd_iocb;
2719
2720         lockdep_assert_held(&phba->hbalock);
2721         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2722                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2723                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2724                         /* remove from txcmpl queue list */
2725                         list_del_init(&cmd_iocb->list);
2726                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2727                         return cmd_iocb;
2728                 }
2729         }
2730         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2731                         "0372 iotag x%x is out off range: max iotag (x%x)\n",
2732                         iotag, phba->sli.last_iotag);
2733         return NULL;
2734 }
2735
2736 /**
2737  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2738  * @phba: Pointer to HBA context object.
2739  * @pring: Pointer to driver SLI ring object.
2740  * @saveq: Pointer to the response iocb to be processed.
2741  *
2742  * This function is called by the ring event handler for non-fcp
2743  * rings when there is a new response iocb in the response ring.
2744  * The caller is not required to hold any locks. This function
2745  * gets the command iocb associated with the response iocb and
2746  * calls the completion handler for the command iocb. If there
2747  * is no completion handler, the function will free the resources
2748  * associated with command iocb. If the response iocb is for
2749  * an already aborted command iocb, the status of the completion
2750  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2751  * This function always returns 1.
2752  **/
2753 static int
2754 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2755                           struct lpfc_iocbq *saveq)
2756 {
2757         struct lpfc_iocbq *cmdiocbp;
2758         int rc = 1;
2759         unsigned long iflag;
2760
2761         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2762         spin_lock_irqsave(&phba->hbalock, iflag);
2763         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2764         spin_unlock_irqrestore(&phba->hbalock, iflag);
2765
2766         if (cmdiocbp) {
2767                 if (cmdiocbp->iocb_cmpl) {
2768                         /*
2769                          * If an ELS command failed send an event to mgmt
2770                          * application.
2771                          */
2772                         if (saveq->iocb.ulpStatus &&
2773                              (pring->ringno == LPFC_ELS_RING) &&
2774                              (cmdiocbp->iocb.ulpCommand ==
2775                                 CMD_ELS_REQUEST64_CR))
2776                                 lpfc_send_els_failure_event(phba,
2777                                         cmdiocbp, saveq);
2778
2779                         /*
2780                          * Post all ELS completions to the worker thread.
2781                          * All other are passed to the completion callback.
2782                          */
2783                         if (pring->ringno == LPFC_ELS_RING) {
2784                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2785                                     (cmdiocbp->iocb_flag &
2786                                                         LPFC_DRIVER_ABORTED)) {
2787                                         spin_lock_irqsave(&phba->hbalock,
2788                                                           iflag);
2789                                         cmdiocbp->iocb_flag &=
2790                                                 ~LPFC_DRIVER_ABORTED;
2791                                         spin_unlock_irqrestore(&phba->hbalock,
2792                                                                iflag);
2793                                         saveq->iocb.ulpStatus =
2794                                                 IOSTAT_LOCAL_REJECT;
2795                                         saveq->iocb.un.ulpWord[4] =
2796                                                 IOERR_SLI_ABORTED;
2797
2798                                         /* Firmware could still be in progress
2799                                          * of DMAing payload, so don't free data
2800                                          * buffer till after a hbeat.
2801                                          */
2802                                         spin_lock_irqsave(&phba->hbalock,
2803                                                           iflag);
2804                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2805                                         spin_unlock_irqrestore(&phba->hbalock,
2806                                                                iflag);
2807                                 }
2808                                 if (phba->sli_rev == LPFC_SLI_REV4) {
2809                                         if (saveq->iocb_flag &
2810                                             LPFC_EXCHANGE_BUSY) {
2811                                                 /* Set cmdiocb flag for the
2812                                                  * exchange busy so sgl (xri)
2813                                                  * will not be released until
2814                                                  * the abort xri is received
2815                                                  * from hba.
2816                                                  */
2817                                                 spin_lock_irqsave(
2818                                                         &phba->hbalock, iflag);
2819                                                 cmdiocbp->iocb_flag |=
2820                                                         LPFC_EXCHANGE_BUSY;
2821                                                 spin_unlock_irqrestore(
2822                                                         &phba->hbalock, iflag);
2823                                         }
2824                                         if (cmdiocbp->iocb_flag &
2825                                             LPFC_DRIVER_ABORTED) {
2826                                                 /*
2827                                                  * Clear LPFC_DRIVER_ABORTED
2828                                                  * bit in case it was driver
2829                                                  * initiated abort.
2830                                                  */
2831                                                 spin_lock_irqsave(
2832                                                         &phba->hbalock, iflag);
2833                                                 cmdiocbp->iocb_flag &=
2834                                                         ~LPFC_DRIVER_ABORTED;
2835                                                 spin_unlock_irqrestore(
2836                                                         &phba->hbalock, iflag);
2837                                                 cmdiocbp->iocb.ulpStatus =
2838                                                         IOSTAT_LOCAL_REJECT;
2839                                                 cmdiocbp->iocb.un.ulpWord[4] =
2840                                                         IOERR_ABORT_REQUESTED;
2841                                                 /*
2842                                                  * For SLI4, irsiocb contains
2843                                                  * NO_XRI in sli_xritag, it
2844                                                  * shall not affect releasing
2845                                                  * sgl (xri) process.
2846                                                  */
2847                                                 saveq->iocb.ulpStatus =
2848                                                         IOSTAT_LOCAL_REJECT;
2849                                                 saveq->iocb.un.ulpWord[4] =
2850                                                         IOERR_SLI_ABORTED;
2851                                                 spin_lock_irqsave(
2852                                                         &phba->hbalock, iflag);
2853                                                 saveq->iocb_flag |=
2854                                                         LPFC_DELAY_MEM_FREE;
2855                                                 spin_unlock_irqrestore(
2856                                                         &phba->hbalock, iflag);
2857                                         }
2858                                 }
2859                         }
2860                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2861                 } else
2862                         lpfc_sli_release_iocbq(phba, cmdiocbp);
2863         } else {
2864                 /*
2865                  * Unknown initiating command based on the response iotag.
2866                  * This could be the case on the ELS ring because of
2867                  * lpfc_els_abort().
2868                  */
2869                 if (pring->ringno != LPFC_ELS_RING) {
2870                         /*
2871                          * Ring <ringno> handler: unexpected completion IoTag
2872                          * <IoTag>
2873                          */
2874                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2875                                          "0322 Ring %d handler: "
2876                                          "unexpected completion IoTag x%x "
2877                                          "Data: x%x x%x x%x x%x\n",
2878                                          pring->ringno,
2879                                          saveq->iocb.ulpIoTag,
2880                                          saveq->iocb.ulpStatus,
2881                                          saveq->iocb.un.ulpWord[4],
2882                                          saveq->iocb.ulpCommand,
2883                                          saveq->iocb.ulpContext);
2884                 }
2885         }
2886
2887         return rc;
2888 }
2889
2890 /**
2891  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2892  * @phba: Pointer to HBA context object.
2893  * @pring: Pointer to driver SLI ring object.
2894  *
2895  * This function is called from the iocb ring event handlers when
2896  * put pointer is ahead of the get pointer for a ring. This function signal
2897  * an error attention condition to the worker thread and the worker
2898  * thread will transition the HBA to offline state.
2899  **/
2900 static void
2901 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2902 {
2903         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2904         /*
2905          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2906          * rsp ring <portRspMax>
2907          */
2908         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2909                         "0312 Ring %d handler: portRspPut %d "
2910                         "is bigger than rsp ring %d\n",
2911                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
2912                         pring->sli.sli3.numRiocb);
2913
2914         phba->link_state = LPFC_HBA_ERROR;
2915
2916         /*
2917          * All error attention handlers are posted to
2918          * worker thread
2919          */
2920         phba->work_ha |= HA_ERATT;
2921         phba->work_hs = HS_FFER3;
2922
2923         lpfc_worker_wake_up(phba);
2924
2925         return;
2926 }
2927
2928 /**
2929  * lpfc_poll_eratt - Error attention polling timer timeout handler
2930  * @ptr: Pointer to address of HBA context object.
2931  *
2932  * This function is invoked by the Error Attention polling timer when the
2933  * timer times out. It will check the SLI Error Attention register for
2934  * possible attention events. If so, it will post an Error Attention event
2935  * and wake up worker thread to process it. Otherwise, it will set up the
2936  * Error Attention polling timer for the next poll.
2937  **/
2938 void lpfc_poll_eratt(unsigned long ptr)
2939 {
2940         struct lpfc_hba *phba;
2941         uint32_t eratt = 0;
2942         uint64_t sli_intr, cnt;
2943
2944         phba = (struct lpfc_hba *)ptr;
2945
2946         /* Here we will also keep track of interrupts per sec of the hba */
2947         sli_intr = phba->sli.slistat.sli_intr;
2948
2949         if (phba->sli.slistat.sli_prev_intr > sli_intr)
2950                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2951                         sli_intr);
2952         else
2953                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2954
2955         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
2956         do_div(cnt, phba->eratt_poll_interval);
2957         phba->sli.slistat.sli_ips = cnt;
2958
2959         phba->sli.slistat.sli_prev_intr = sli_intr;
2960
2961         /* Check chip HA register for error event */
2962         eratt = lpfc_sli_check_eratt(phba);
2963
2964         if (eratt)
2965                 /* Tell the worker thread there is work to do */
2966                 lpfc_worker_wake_up(phba);
2967         else
2968                 /* Restart the timer for next eratt poll */
2969                 mod_timer(&phba->eratt_poll,
2970                           jiffies +
2971                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
2972         return;
2973 }
2974
2975
2976 /**
2977  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2978  * @phba: Pointer to HBA context object.
2979  * @pring: Pointer to driver SLI ring object.
2980  * @mask: Host attention register mask for this ring.
2981  *
2982  * This function is called from the interrupt context when there is a ring
2983  * event for the fcp ring. The caller does not hold any lock.
2984  * The function processes each response iocb in the response ring until it
2985  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2986  * LE bit set. The function will call the completion handler of the command iocb
2987  * if the response iocb indicates a completion for a command iocb or it is
2988  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2989  * function if this is an unsolicited iocb.
2990  * This routine presumes LPFC_FCP_RING handling and doesn't bother
2991  * to check it explicitly.
2992  */
2993 int
2994 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2995                                 struct lpfc_sli_ring *pring, uint32_t mask)
2996 {
2997         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2998         IOCB_t *irsp = NULL;
2999         IOCB_t *entry = NULL;
3000         struct lpfc_iocbq *cmdiocbq = NULL;
3001         struct lpfc_iocbq rspiocbq;
3002         uint32_t status;
3003         uint32_t portRspPut, portRspMax;
3004         int rc = 1;
3005         lpfc_iocb_type type;
3006         unsigned long iflag;
3007         uint32_t rsp_cmpl = 0;
3008
3009         spin_lock_irqsave(&phba->hbalock, iflag);
3010         pring->stats.iocb_event++;
3011
3012         /*
3013          * The next available response entry should never exceed the maximum
3014          * entries.  If it does, treat it as an adapter hardware error.
3015          */
3016         portRspMax = pring->sli.sli3.numRiocb;
3017         portRspPut = le32_to_cpu(pgp->rspPutInx);
3018         if (unlikely(portRspPut >= portRspMax)) {
3019                 lpfc_sli_rsp_pointers_error(phba, pring);
3020                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3021                 return 1;
3022         }
3023         if (phba->fcp_ring_in_use) {
3024                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3025                 return 1;
3026         } else
3027                 phba->fcp_ring_in_use = 1;
3028
3029         rmb();
3030         while (pring->sli.sli3.rspidx != portRspPut) {
3031                 /*
3032                  * Fetch an entry off the ring and copy it into a local data
3033                  * structure.  The copy involves a byte-swap since the
3034                  * network byte order and pci byte orders are different.
3035                  */
3036                 entry = lpfc_resp_iocb(phba, pring);
3037                 phba->last_completion_time = jiffies;
3038
3039                 if (++pring->sli.sli3.rspidx >= portRspMax)
3040                         pring->sli.sli3.rspidx = 0;
3041
3042                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3043                                       (uint32_t *) &rspiocbq.iocb,
3044                                       phba->iocb_rsp_size);
3045                 INIT_LIST_HEAD(&(rspiocbq.list));
3046                 irsp = &rspiocbq.iocb;
3047
3048                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3049                 pring->stats.iocb_rsp++;
3050                 rsp_cmpl++;
3051
3052                 if (unlikely(irsp->ulpStatus)) {
3053                         /*
3054                          * If resource errors reported from HBA, reduce
3055                          * queuedepths of the SCSI device.
3056                          */
3057                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3058                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3059                              IOERR_NO_RESOURCES)) {
3060                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3061                                 phba->lpfc_rampdown_queue_depth(phba);
3062                                 spin_lock_irqsave(&phba->hbalock, iflag);
3063                         }
3064
3065                         /* Rsp ring <ringno> error: IOCB */
3066                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3067                                         "0336 Rsp Ring %d error: IOCB Data: "
3068                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3069                                         pring->ringno,
3070                                         irsp->un.ulpWord[0],
3071                                         irsp->un.ulpWord[1],
3072                                         irsp->un.ulpWord[2],
3073                                         irsp->un.ulpWord[3],
3074                                         irsp->un.ulpWord[4],
3075                                         irsp->un.ulpWord[5],
3076                                         *(uint32_t *)&irsp->un1,
3077                                         *((uint32_t *)&irsp->un1 + 1));
3078                 }
3079
3080                 switch (type) {
3081                 case LPFC_ABORT_IOCB:
3082                 case LPFC_SOL_IOCB:
3083                         /*
3084                          * Idle exchange closed via ABTS from port.  No iocb
3085                          * resources need to be recovered.
3086                          */
3087                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3088                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3089                                                 "0333 IOCB cmd 0x%x"
3090                                                 " processed. Skipping"
3091                                                 " completion\n",
3092                                                 irsp->ulpCommand);
3093                                 break;
3094                         }
3095
3096                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3097                                                          &rspiocbq);
3098                         if (unlikely(!cmdiocbq))
3099                                 break;
3100                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3101                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3102                         if (cmdiocbq->iocb_cmpl) {
3103                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3104                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3105                                                       &rspiocbq);
3106                                 spin_lock_irqsave(&phba->hbalock, iflag);
3107                         }
3108                         break;
3109                 case LPFC_UNSOL_IOCB:
3110                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3111                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3112                         spin_lock_irqsave(&phba->hbalock, iflag);
3113                         break;
3114                 default:
3115                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3116                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3117                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3118                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3119                                        MAX_MSG_DATA);
3120                                 dev_warn(&((phba->pcidev)->dev),
3121                                          "lpfc%d: %s\n",
3122                                          phba->brd_no, adaptermsg);
3123                         } else {
3124                                 /* Unknown IOCB command */
3125                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3126                                                 "0334 Unknown IOCB command "
3127                                                 "Data: x%x, x%x x%x x%x x%x\n",
3128                                                 type, irsp->ulpCommand,
3129                                                 irsp->ulpStatus,
3130                                                 irsp->ulpIoTag,
3131                                                 irsp->ulpContext);
3132                         }
3133                         break;
3134                 }
3135
3136                 /*
3137                  * The response IOCB has been processed.  Update the ring
3138                  * pointer in SLIM.  If the port response put pointer has not
3139                  * been updated, sync the pgp->rspPutInx and fetch the new port
3140                  * response put pointer.
3141                  */
3142                 writel(pring->sli.sli3.rspidx,
3143                         &phba->host_gp[pring->ringno].rspGetInx);
3144
3145                 if (pring->sli.sli3.rspidx == portRspPut)
3146                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3147         }
3148
3149         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3150                 pring->stats.iocb_rsp_full++;
3151                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3152                 writel(status, phba->CAregaddr);
3153                 readl(phba->CAregaddr);
3154         }
3155         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3156                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3157                 pring->stats.iocb_cmd_empty++;
3158
3159                 /* Force update of the local copy of cmdGetInx */
3160                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3161                 lpfc_sli_resume_iocb(phba, pring);
3162
3163                 if ((pring->lpfc_sli_cmd_available))
3164                         (pring->lpfc_sli_cmd_available) (phba, pring);
3165
3166         }
3167
3168         phba->fcp_ring_in_use = 0;
3169         spin_unlock_irqrestore(&phba->hbalock, iflag);
3170         return rc;
3171 }
3172
3173 /**
3174  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3175  * @phba: Pointer to HBA context object.
3176  * @pring: Pointer to driver SLI ring object.
3177  * @rspiocbp: Pointer to driver response IOCB object.
3178  *
3179  * This function is called from the worker thread when there is a slow-path
3180  * response IOCB to process. This function chains all the response iocbs until
3181  * seeing the iocb with the LE bit set. The function will call
3182  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3183  * completion of a command iocb. The function will call the
3184  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3185  * The function frees the resources or calls the completion handler if this
3186  * iocb is an abort completion. The function returns NULL when the response
3187  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3188  * this function shall chain the iocb on to the iocb_continueq and return the
3189  * response iocb passed in.
3190  **/
3191 static struct lpfc_iocbq *
3192 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3193                         struct lpfc_iocbq *rspiocbp)
3194 {
3195         struct lpfc_iocbq *saveq;
3196         struct lpfc_iocbq *cmdiocbp;
3197         struct lpfc_iocbq *next_iocb;
3198         IOCB_t *irsp = NULL;
3199         uint32_t free_saveq;
3200         uint8_t iocb_cmd_type;
3201         lpfc_iocb_type type;
3202         unsigned long iflag;
3203         int rc;
3204
3205         spin_lock_irqsave(&phba->hbalock, iflag);
3206         /* First add the response iocb to the countinueq list */
3207         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3208         pring->iocb_continueq_cnt++;
3209
3210         /* Now, determine whether the list is completed for processing */
3211         irsp = &rspiocbp->iocb;
3212         if (irsp->ulpLe) {
3213                 /*
3214                  * By default, the driver expects to free all resources
3215                  * associated with this iocb completion.
3216                  */
3217                 free_saveq = 1;
3218                 saveq = list_get_first(&pring->iocb_continueq,
3219                                        struct lpfc_iocbq, list);
3220                 irsp = &(saveq->iocb);
3221                 list_del_init(&pring->iocb_continueq);
3222                 pring->iocb_continueq_cnt = 0;
3223
3224                 pring->stats.iocb_rsp++;
3225
3226                 /*
3227                  * If resource errors reported from HBA, reduce
3228                  * queuedepths of the SCSI device.
3229                  */
3230                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3231                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3232                      IOERR_NO_RESOURCES)) {
3233                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3234                         phba->lpfc_rampdown_queue_depth(phba);
3235                         spin_lock_irqsave(&phba->hbalock, iflag);
3236                 }
3237
3238                 if (irsp->ulpStatus) {
3239                         /* Rsp ring <ringno> error: IOCB */
3240                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3241                                         "0328 Rsp Ring %d error: "
3242                                         "IOCB Data: "
3243                                         "x%x x%x x%x x%x "
3244                                         "x%x x%x x%x x%x "
3245                                         "x%x x%x x%x x%x "
3246                                         "x%x x%x x%x x%x\n",
3247                                         pring->ringno,
3248                                         irsp->un.ulpWord[0],
3249                                         irsp->un.ulpWord[1],
3250                                         irsp->un.ulpWord[2],
3251                                         irsp->un.ulpWord[3],
3252                                         irsp->un.ulpWord[4],
3253                                         irsp->un.ulpWord[5],
3254                                         *(((uint32_t *) irsp) + 6),
3255                                         *(((uint32_t *) irsp) + 7),
3256                                         *(((uint32_t *) irsp) + 8),
3257                                         *(((uint32_t *) irsp) + 9),
3258                                         *(((uint32_t *) irsp) + 10),
3259                                         *(((uint32_t *) irsp) + 11),
3260                                         *(((uint32_t *) irsp) + 12),
3261                                         *(((uint32_t *) irsp) + 13),
3262                                         *(((uint32_t *) irsp) + 14),
3263                                         *(((uint32_t *) irsp) + 15));
3264                 }
3265
3266                 /*
3267                  * Fetch the IOCB command type and call the correct completion
3268                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3269                  * get freed back to the lpfc_iocb_list by the discovery
3270                  * kernel thread.
3271                  */
3272                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3273                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3274                 switch (type) {
3275                 case LPFC_SOL_IOCB:
3276                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3277                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3278                         spin_lock_irqsave(&phba->hbalock, iflag);
3279                         break;
3280
3281                 case LPFC_UNSOL_IOCB:
3282                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3283                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3284                         spin_lock_irqsave(&phba->hbalock, iflag);
3285                         if (!rc)
3286                                 free_saveq = 0;
3287                         break;
3288
3289                 case LPFC_ABORT_IOCB:
3290                         cmdiocbp = NULL;
3291                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3292                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3293                                                                  saveq);
3294                         if (cmdiocbp) {
3295                                 /* Call the specified completion routine */
3296                                 if (cmdiocbp->iocb_cmpl) {
3297                                         spin_unlock_irqrestore(&phba->hbalock,
3298                                                                iflag);
3299                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3300                                                               saveq);
3301                                         spin_lock_irqsave(&phba->hbalock,
3302                                                           iflag);
3303                                 } else
3304                                         __lpfc_sli_release_iocbq(phba,
3305                                                                  cmdiocbp);
3306                         }
3307                         break;
3308
3309                 case LPFC_UNKNOWN_IOCB:
3310                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3311                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3312                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3313                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3314                                        MAX_MSG_DATA);
3315                                 dev_warn(&((phba->pcidev)->dev),
3316                                          "lpfc%d: %s\n",
3317                                          phba->brd_no, adaptermsg);
3318                         } else {
3319                                 /* Unknown IOCB command */
3320                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3321                                                 "0335 Unknown IOCB "
3322                                                 "command Data: x%x "
3323                                                 "x%x x%x x%x\n",
3324                                                 irsp->ulpCommand,
3325                                                 irsp->ulpStatus,
3326                                                 irsp->ulpIoTag,
3327                                                 irsp->ulpContext);
3328                         }
3329                         break;
3330                 }
3331
3332                 if (free_saveq) {
3333                         list_for_each_entry_safe(rspiocbp, next_iocb,
3334                                                  &saveq->list, list) {
3335                                 list_del_init(&rspiocbp->list);
3336                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3337                         }
3338                         __lpfc_sli_release_iocbq(phba, saveq);
3339                 }
3340                 rspiocbp = NULL;
3341         }
3342         spin_unlock_irqrestore(&phba->hbalock, iflag);
3343         return rspiocbp;
3344 }
3345
3346 /**
3347  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3348  * @phba: Pointer to HBA context object.
3349  * @pring: Pointer to driver SLI ring object.
3350  * @mask: Host attention register mask for this ring.
3351  *
3352  * This routine wraps the actual slow_ring event process routine from the
3353  * API jump table function pointer from the lpfc_hba struct.
3354  **/
3355 void
3356 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3357                                 struct lpfc_sli_ring *pring, uint32_t mask)
3358 {
3359         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3360 }
3361
3362 /**
3363  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3364  * @phba: Pointer to HBA context object.
3365  * @pring: Pointer to driver SLI ring object.
3366  * @mask: Host attention register mask for this ring.
3367  *
3368  * This function is called from the worker thread when there is a ring event
3369  * for non-fcp rings. The caller does not hold any lock. The function will
3370  * remove each response iocb in the response ring and calls the handle
3371  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3372  **/
3373 static void
3374 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3375                                    struct lpfc_sli_ring *pring, uint32_t mask)
3376 {
3377         struct lpfc_pgp *pgp;
3378         IOCB_t *entry;
3379         IOCB_t *irsp = NULL;
3380         struct lpfc_iocbq *rspiocbp = NULL;
3381         uint32_t portRspPut, portRspMax;
3382         unsigned long iflag;
3383         uint32_t status;
3384
3385         pgp = &phba->port_gp[pring->ringno];
3386         spin_lock_irqsave(&phba->hbalock, iflag);
3387         pring->stats.iocb_event++;
3388
3389         /*
3390          * The next available response entry should never exceed the maximum
3391          * entries.  If it does, treat it as an adapter hardware error.
3392          */
3393         portRspMax = pring->sli.sli3.numRiocb;
3394         portRspPut = le32_to_cpu(pgp->rspPutInx);
3395         if (portRspPut >= portRspMax) {
3396                 /*
3397                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3398                  * rsp ring <portRspMax>
3399                  */
3400                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3401                                 "0303 Ring %d handler: portRspPut %d "
3402                                 "is bigger than rsp ring %d\n",
3403                                 pring->ringno, portRspPut, portRspMax);
3404
3405                 phba->link_state = LPFC_HBA_ERROR;
3406                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3407
3408                 phba->work_hs = HS_FFER3;
3409                 lpfc_handle_eratt(phba);
3410
3411                 return;
3412         }
3413
3414         rmb();
3415         while (pring->sli.sli3.rspidx != portRspPut) {
3416                 /*
3417                  * Build a completion list and call the appropriate handler.
3418                  * The process is to get the next available response iocb, get
3419                  * a free iocb from the list, copy the response data into the
3420                  * free iocb, insert to the continuation list, and update the
3421                  * next response index to slim.  This process makes response
3422                  * iocb's in the ring available to DMA as fast as possible but
3423                  * pays a penalty for a copy operation.  Since the iocb is
3424                  * only 32 bytes, this penalty is considered small relative to
3425                  * the PCI reads for register values and a slim write.  When
3426                  * the ulpLe field is set, the entire Command has been
3427                  * received.
3428                  */
3429                 entry = lpfc_resp_iocb(phba, pring);
3430
3431                 phba->last_completion_time = jiffies;
3432                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3433                 if (rspiocbp == NULL) {
3434                         printk(KERN_ERR "%s: out of buffers! Failing "
3435                                "completion.\n", __func__);
3436                         break;
3437                 }
3438
3439                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3440                                       phba->iocb_rsp_size);
3441                 irsp = &rspiocbp->iocb;
3442
3443                 if (++pring->sli.sli3.rspidx >= portRspMax)
3444                         pring->sli.sli3.rspidx = 0;
3445
3446                 if (pring->ringno == LPFC_ELS_RING) {
3447                         lpfc_debugfs_slow_ring_trc(phba,
3448                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3449                                 *(((uint32_t *) irsp) + 4),
3450                                 *(((uint32_t *) irsp) + 6),
3451                                 *(((uint32_t *) irsp) + 7));
3452                 }
3453
3454                 writel(pring->sli.sli3.rspidx,
3455                         &phba->host_gp[pring->ringno].rspGetInx);
3456
3457                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3458                 /* Handle the response IOCB */
3459                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3460                 spin_lock_irqsave(&phba->hbalock, iflag);
3461
3462                 /*
3463                  * If the port response put pointer has not been updated, sync
3464                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3465                  * response put pointer.
3466                  */
3467                 if (pring->sli.sli3.rspidx == portRspPut) {
3468                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3469                 }
3470         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3471
3472         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3473                 /* At least one response entry has been freed */
3474                 pring->stats.iocb_rsp_full++;
3475                 /* SET RxRE_RSP in Chip Att register */
3476                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3477                 writel(status, phba->CAregaddr);
3478                 readl(phba->CAregaddr); /* flush */
3479         }
3480         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3481                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3482                 pring->stats.iocb_cmd_empty++;
3483
3484                 /* Force update of the local copy of cmdGetInx */
3485                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3486                 lpfc_sli_resume_iocb(phba, pring);
3487
3488                 if ((pring->lpfc_sli_cmd_available))
3489                         (pring->lpfc_sli_cmd_available) (phba, pring);
3490
3491         }
3492
3493         spin_unlock_irqrestore(&phba->hbalock, iflag);
3494         return;
3495 }
3496
3497 /**
3498  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3499  * @phba: Pointer to HBA context object.
3500  * @pring: Pointer to driver SLI ring object.
3501  * @mask: Host attention register mask for this ring.
3502  *
3503  * This function is called from the worker thread when there is a pending
3504  * ELS response iocb on the driver internal slow-path response iocb worker
3505  * queue. The caller does not hold any lock. The function will remove each
3506  * response iocb from the response worker queue and calls the handle
3507  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3508  **/
3509 static void
3510 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3511                                    struct lpfc_sli_ring *pring, uint32_t mask)
3512 {
3513         struct lpfc_iocbq *irspiocbq;
3514         struct hbq_dmabuf *dmabuf;
3515         struct lpfc_cq_event *cq_event;
3516         unsigned long iflag;
3517         int count = 0;
3518
3519         spin_lock_irqsave(&phba->hbalock, iflag);
3520         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3521         spin_unlock_irqrestore(&phba->hbalock, iflag);
3522         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3523                 /* Get the response iocb from the head of work queue */
3524                 spin_lock_irqsave(&phba->hbalock, iflag);
3525                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3526                                  cq_event, struct lpfc_cq_event, list);
3527                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3528
3529                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3530                 case CQE_CODE_COMPL_WQE:
3531                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3532                                                  cq_event);
3533                         /* Translate ELS WCQE to response IOCBQ */
3534                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3535                                                                    irspiocbq);
3536                         if (irspiocbq)
3537                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3538                                                            irspiocbq);
3539                         count++;
3540                         break;
3541                 case CQE_CODE_RECEIVE:
3542                 case CQE_CODE_RECEIVE_V1:
3543                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3544                                               cq_event);
3545                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3546                         count++;
3547                         break;
3548                 default:
3549                         break;
3550                 }
3551
3552                 /* Limit the number of events to 64 to avoid soft lockups */
3553                 if (count == 64)
3554                         break;
3555         }
3556 }
3557
3558 /**
3559  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3560  * @phba: Pointer to HBA context object.
3561  * @pring: Pointer to driver SLI ring object.
3562  *
3563  * This function aborts all iocbs in the given ring and frees all the iocb
3564  * objects in txq. This function issues an abort iocb for all the iocb commands
3565  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3566  * the return of this function. The caller is not required to hold any locks.
3567  **/
3568 void
3569 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3570 {
3571         LIST_HEAD(completions);
3572         struct lpfc_iocbq *iocb, *next_iocb;
3573
3574         if (pring->ringno == LPFC_ELS_RING) {
3575                 lpfc_fabric_abort_hba(phba);
3576         }
3577
3578         /* Error everything on txq and txcmplq
3579          * First do the txq.
3580          */
3581         if (phba->sli_rev >= LPFC_SLI_REV4) {
3582                 spin_lock_irq(&pring->ring_lock);
3583                 list_splice_init(&pring->txq, &completions);
3584                 pring->txq_cnt = 0;
3585                 spin_unlock_irq(&pring->ring_lock);
3586
3587                 spin_lock_irq(&phba->hbalock);
3588                 /* Next issue ABTS for everything on the txcmplq */
3589                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3590                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3591                 spin_unlock_irq(&phba->hbalock);
3592         } else {
3593                 spin_lock_irq(&phba->hbalock);
3594                 list_splice_init(&pring->txq, &completions);
3595                 pring->txq_cnt = 0;
3596
3597                 /* Next issue ABTS for everything on the txcmplq */
3598                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3599                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3600                 spin_unlock_irq(&phba->hbalock);
3601         }
3602
3603         /* Cancel all the IOCBs from the completions list */
3604         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3605                               IOERR_SLI_ABORTED);
3606 }
3607
3608 /**
3609  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3610  * @phba: Pointer to HBA context object.
3611  * @pring: Pointer to driver SLI ring object.
3612  *
3613  * This function aborts all iocbs in FCP rings and frees all the iocb
3614  * objects in txq. This function issues an abort iocb for all the iocb commands
3615  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3616  * the return of this function. The caller is not required to hold any locks.
3617  **/
3618 void
3619 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3620 {
3621         struct lpfc_sli *psli = &phba->sli;
3622         struct lpfc_sli_ring  *pring;
3623         uint32_t i;
3624
3625         /* Look on all the FCP Rings for the iotag */
3626         if (phba->sli_rev >= LPFC_SLI_REV4) {
3627                 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3628                         pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3629                         lpfc_sli_abort_iocb_ring(phba, pring);
3630                 }
3631         } else {
3632                 pring = &psli->ring[psli->fcp_ring];
3633                 lpfc_sli_abort_iocb_ring(phba, pring);
3634         }
3635 }
3636
3637
3638 /**
3639  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3640  * @phba: Pointer to HBA context object.
3641  *
3642  * This function flushes all iocbs in the fcp ring and frees all the iocb
3643  * objects in txq and txcmplq. This function will not issue abort iocbs
3644  * for all the iocb commands in txcmplq, they will just be returned with
3645  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3646  * slot has been permanently disabled.
3647  **/
3648 void
3649 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3650 {
3651         LIST_HEAD(txq);
3652         LIST_HEAD(txcmplq);
3653         struct lpfc_sli *psli = &phba->sli;
3654         struct lpfc_sli_ring  *pring;
3655         uint32_t i;
3656
3657         spin_lock_irq(&phba->hbalock);
3658         /* Indicate the I/O queues are flushed */
3659         phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3660         spin_unlock_irq(&phba->hbalock);
3661
3662         /* Look on all the FCP Rings for the iotag */
3663         if (phba->sli_rev >= LPFC_SLI_REV4) {
3664                 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3665                         pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3666
3667                         spin_lock_irq(&pring->ring_lock);
3668                         /* Retrieve everything on txq */
3669                         list_splice_init(&pring->txq, &txq);
3670                         /* Retrieve everything on the txcmplq */
3671                         list_splice_init(&pring->txcmplq, &txcmplq);
3672                         pring->txq_cnt = 0;
3673                         pring->txcmplq_cnt = 0;
3674                         spin_unlock_irq(&pring->ring_lock);
3675
3676                         /* Flush the txq */
3677                         lpfc_sli_cancel_iocbs(phba, &txq,
3678                                               IOSTAT_LOCAL_REJECT,
3679                                               IOERR_SLI_DOWN);
3680                         /* Flush the txcmpq */
3681                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
3682                                               IOSTAT_LOCAL_REJECT,
3683                                               IOERR_SLI_DOWN);
3684                 }
3685         } else {
3686                 pring = &psli->ring[psli->fcp_ring];
3687
3688                 spin_lock_irq(&phba->hbalock);
3689                 /* Retrieve everything on txq */
3690                 list_splice_init(&pring->txq, &txq);
3691                 /* Retrieve everything on the txcmplq */
3692                 list_splice_init(&pring->txcmplq, &txcmplq);
3693                 pring->txq_cnt = 0;
3694                 pring->txcmplq_cnt = 0;
3695                 spin_unlock_irq(&phba->hbalock);
3696
3697                 /* Flush the txq */
3698                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3699                                       IOERR_SLI_DOWN);
3700                 /* Flush the txcmpq */
3701                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3702                                       IOERR_SLI_DOWN);
3703         }
3704 }
3705
3706 /**
3707  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3708  * @phba: Pointer to HBA context object.
3709  * @mask: Bit mask to be checked.
3710  *
3711  * This function reads the host status register and compares
3712  * with the provided bit mask to check if HBA completed
3713  * the restart. This function will wait in a loop for the
3714  * HBA to complete restart. If the HBA does not restart within
3715  * 15 iterations, the function will reset the HBA again. The
3716  * function returns 1 when HBA fail to restart otherwise returns
3717  * zero.
3718  **/
3719 static int
3720 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3721 {
3722         uint32_t status;
3723         int i = 0;
3724         int retval = 0;
3725
3726         /* Read the HBA Host Status Register */
3727         if (lpfc_readl(phba->HSregaddr, &status))
3728                 return 1;
3729
3730         /*
3731          * Check status register every 100ms for 5 retries, then every
3732          * 500ms for 5, then every 2.5 sec for 5, then reset board and
3733          * every 2.5 sec for 4.
3734          * Break our of the loop if errors occurred during init.
3735          */
3736         while (((status & mask) != mask) &&
3737                !(status & HS_FFERM) &&
3738                i++ < 20) {
3739
3740                 if (i <= 5)
3741                         msleep(10);
3742                 else if (i <= 10)
3743                         msleep(500);
3744                 else
3745                         msleep(2500);
3746
3747                 if (i == 15) {
3748                                 /* Do post */
3749                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3750                         lpfc_sli_brdrestart(phba);
3751                 }
3752                 /* Read the HBA Host Status Register */
3753                 if (lpfc_readl(phba->HSregaddr, &status)) {
3754                         retval = 1;
3755                         break;
3756                 }
3757         }
3758
3759         /* Check to see if any errors occurred during init */
3760         if ((status & HS_FFERM) || (i >= 20)) {
3761                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3762                                 "2751 Adapter failed to restart, "
3763                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3764                                 status,
3765                                 readl(phba->MBslimaddr + 0xa8),
3766                                 readl(phba->MBslimaddr + 0xac));
3767                 phba->link_state = LPFC_HBA_ERROR;
3768                 retval = 1;
3769         }
3770
3771         return retval;
3772 }
3773
3774 /**
3775  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3776  * @phba: Pointer to HBA context object.
3777  * @mask: Bit mask to be checked.
3778  *
3779  * This function checks the host status register to check if HBA is
3780  * ready. This function will wait in a loop for the HBA to be ready
3781  * If the HBA is not ready , the function will will reset the HBA PCI
3782  * function again. The function returns 1 when HBA fail to be ready
3783  * otherwise returns zero.
3784  **/
3785 static int
3786 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3787 {
3788         uint32_t status;
3789         int retval = 0;
3790
3791         /* Read the HBA Host Status Register */
3792         status = lpfc_sli4_post_status_check(phba);
3793
3794         if (status) {
3795                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3796                 lpfc_sli_brdrestart(phba);
3797                 status = lpfc_sli4_post_status_check(phba);
3798         }
3799
3800         /* Check to see if any errors occurred during init */
3801         if (status) {
3802                 phba->link_state = LPFC_HBA_ERROR;
3803                 retval = 1;
3804         } else
3805                 phba->sli4_hba.intr_enable = 0;
3806
3807         return retval;
3808 }
3809
3810 /**
3811  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3812  * @phba: Pointer to HBA context object.
3813  * @mask: Bit mask to be checked.
3814  *
3815  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3816  * from the API jump table function pointer from the lpfc_hba struct.
3817  **/
3818 int
3819 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3820 {
3821         return phba->lpfc_sli_brdready(phba, mask);
3822 }
3823
3824 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3825
3826 /**
3827  * lpfc_reset_barrier - Make HBA ready for HBA reset
3828  * @phba: Pointer to HBA context object.
3829  *
3830  * This function is called before resetting an HBA. This function is called
3831  * with hbalock held and requests HBA to quiesce DMAs before a reset.
3832  **/
3833 void lpfc_reset_barrier(struct lpfc_hba *phba)
3834 {
3835         uint32_t __iomem *resp_buf;
3836         uint32_t __iomem *mbox_buf;
3837         volatile uint32_t mbox;
3838         uint32_t hc_copy, ha_copy, resp_data;
3839         int  i;
3840         uint8_t hdrtype;
3841
3842         lockdep_assert_held(&phba->hbalock);
3843
3844         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3845         if (hdrtype != 0x80 ||
3846             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3847              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3848                 return;
3849
3850         /*
3851          * Tell the other part of the chip to suspend temporarily all
3852          * its DMA activity.
3853          */
3854         resp_buf = phba->MBslimaddr;
3855
3856         /* Disable the error attention */
3857         if (lpfc_readl(phba->HCregaddr, &hc_copy))
3858                 return;
3859         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3860         readl(phba->HCregaddr); /* flush */
3861         phba->link_flag |= LS_IGNORE_ERATT;
3862
3863         if (lpfc_readl(phba->HAregaddr, &ha_copy))
3864                 return;
3865         if (ha_copy & HA_ERATT) {
3866                 /* Clear Chip error bit */
3867                 writel(HA_ERATT, phba->HAregaddr);
3868                 phba->pport->stopped = 1;
3869         }
3870
3871         mbox = 0;
3872         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3873         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3874
3875         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3876         mbox_buf = phba->MBslimaddr;
3877         writel(mbox, mbox_buf);
3878
3879         for (i = 0; i < 50; i++) {
3880                 if (lpfc_readl((resp_buf + 1), &resp_data))
3881                         return;
3882                 if (resp_data != ~(BARRIER_TEST_PATTERN))
3883                         mdelay(1);
3884                 else
3885                         break;
3886         }
3887         resp_data = 0;
3888         if (lpfc_readl((resp_buf + 1), &resp_data))
3889                 return;
3890         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
3891                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3892                     phba->pport->stopped)
3893                         goto restore_hc;
3894                 else
3895                         goto clear_errat;
3896         }
3897
3898         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3899         resp_data = 0;
3900         for (i = 0; i < 500; i++) {
3901                 if (lpfc_readl(resp_buf, &resp_data))
3902                         return;
3903                 if (resp_data != mbox)
3904                         mdelay(1);
3905                 else
3906                         break;
3907         }
3908
3909 clear_errat:
3910
3911         while (++i < 500) {
3912                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3913                         return;
3914                 if (!(ha_copy & HA_ERATT))
3915                         mdelay(1);
3916                 else
3917                         break;
3918         }
3919
3920         if (readl(phba->HAregaddr) & HA_ERATT) {
3921                 writel(HA_ERATT, phba->HAregaddr);
3922                 phba->pport->stopped = 1;
3923         }
3924
3925 restore_hc:
3926         phba->link_flag &= ~LS_IGNORE_ERATT;
3927         writel(hc_copy, phba->HCregaddr);
3928         readl(phba->HCregaddr); /* flush */
3929 }
3930
3931 /**
3932  * lpfc_sli_brdkill - Issue a kill_board mailbox command
3933  * @phba: Pointer to HBA context object.
3934  *
3935  * This function issues a kill_board mailbox command and waits for
3936  * the error attention interrupt. This function is called for stopping
3937  * the firmware processing. The caller is not required to hold any
3938  * locks. This function calls lpfc_hba_down_post function to free
3939  * any pending commands after the kill. The function will return 1 when it
3940  * fails to kill the board else will return 0.
3941  **/
3942 int
3943 lpfc_sli_brdkill(struct lpfc_hba *phba)
3944 {
3945         struct lpfc_sli *psli;
3946         LPFC_MBOXQ_t *pmb;
3947         uint32_t status;
3948         uint32_t ha_copy;
3949         int retval;
3950         int i = 0;
3951
3952         psli = &phba->sli;
3953
3954         /* Kill HBA */
3955         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3956                         "0329 Kill HBA Data: x%x x%x\n",
3957                         phba->pport->port_state, psli->sli_flag);
3958
3959         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3960         if (!pmb)
3961                 return 1;
3962
3963         /* Disable the error attention */
3964         spin_lock_irq(&phba->hbalock);
3965         if (lpfc_readl(phba->HCregaddr, &status)) {
3966                 spin_unlock_irq(&phba->hbalock);
3967                 mempool_free(pmb, phba->mbox_mem_pool);
3968                 return 1;
3969         }
3970         status &= ~HC_ERINT_ENA;
3971         writel(status, phba->HCregaddr);
3972         readl(phba->HCregaddr); /* flush */
3973         phba->link_flag |= LS_IGNORE_ERATT;
3974         spin_unlock_irq(&phba->hbalock);
3975
3976         lpfc_kill_board(phba, pmb);
3977         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3978         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3979
3980         if (retval != MBX_SUCCESS) {
3981                 if (retval != MBX_BUSY)
3982                         mempool_free(pmb, phba->mbox_mem_pool);
3983                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3984                                 "2752 KILL_BOARD command failed retval %d\n",
3985                                 retval);
3986                 spin_lock_irq(&phba->hbalock);
3987                 phba->link_flag &= ~LS_IGNORE_ERATT;
3988                 spin_unlock_irq(&phba->hbalock);
3989                 return 1;
3990         }
3991
3992         spin_lock_irq(&phba->hbalock);
3993         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3994         spin_unlock_irq(&phba->hbalock);
3995
3996         mempool_free(pmb, phba->mbox_mem_pool);
3997
3998         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3999          * attention every 100ms for 3 seconds. If we don't get ERATT after
4000          * 3 seconds we still set HBA_ERROR state because the status of the
4001          * board is now undefined.
4002          */
4003         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4004                 return 1;
4005         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4006                 mdelay(100);
4007                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4008                         return 1;
4009         }
4010
4011         del_timer_sync(&psli->mbox_tmo);
4012         if (ha_copy & HA_ERATT) {
4013                 writel(HA_ERATT, phba->HAregaddr);
4014                 phba->pport->stopped = 1;
4015         }
4016         spin_lock_irq(&phba->hbalock);
4017         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4018         psli->mbox_active = NULL;
4019         phba->link_flag &= ~LS_IGNORE_ERATT;
4020         spin_unlock_irq(&phba->hbalock);
4021
4022         lpfc_hba_down_post(phba);
4023         phba->link_state = LPFC_HBA_ERROR;
4024
4025         return ha_copy & HA_ERATT ? 0 : 1;
4026 }
4027
4028 /**
4029  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4030  * @phba: Pointer to HBA context object.
4031  *
4032  * This function resets the HBA by writing HC_INITFF to the control
4033  * register. After the HBA resets, this function resets all the iocb ring
4034  * indices. This function disables PCI layer parity checking during
4035  * the reset.
4036  * This function returns 0 always.
4037  * The caller is not required to hold any locks.
4038  **/
4039 int
4040 lpfc_sli_brdreset(struct lpfc_hba *phba)
4041 {
4042         struct lpfc_sli *psli;
4043         struct lpfc_sli_ring *pring;
4044         uint16_t cfg_value;
4045         int i;
4046
4047         psli = &phba->sli;
4048
4049         /* Reset HBA */
4050         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4051                         "0325 Reset HBA Data: x%x x%x\n",
4052                         phba->pport->port_state, psli->sli_flag);
4053
4054         /* perform board reset */
4055         phba->fc_eventTag = 0;
4056         phba->link_events = 0;
4057         phba->pport->fc_myDID = 0;
4058         phba->pport->fc_prevDID = 0;
4059
4060         /* Turn off parity checking and serr during the physical reset */
4061         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4062         pci_write_config_word(phba->pcidev, PCI_COMMAND,
4063                               (cfg_value &
4064                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4065
4066         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4067
4068         /* Now toggle INITFF bit in the Host Control Register */
4069         writel(HC_INITFF, phba->HCregaddr);
4070         mdelay(1);
4071         readl(phba->HCregaddr); /* flush */
4072         writel(0, phba->HCregaddr);
4073         readl(phba->HCregaddr); /* flush */
4074
4075         /* Restore PCI cmd register */
4076         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4077
4078         /* Initialize relevant SLI info */
4079         for (i = 0; i < psli->num_rings; i++) {
4080                 pring = &psli->ring[i];
4081                 pring->flag = 0;
4082                 pring->sli.sli3.rspidx = 0;
4083                 pring->sli.sli3.next_cmdidx  = 0;
4084                 pring->sli.sli3.local_getidx = 0;
4085                 pring->sli.sli3.cmdidx = 0;
4086                 pring->missbufcnt = 0;
4087         }
4088
4089         phba->link_state = LPFC_WARM_START;
4090         return 0;
4091 }
4092
4093 /**
4094  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4095  * @phba: Pointer to HBA context object.
4096  *
4097  * This function resets a SLI4 HBA. This function disables PCI layer parity
4098  * checking during resets the device. The caller is not required to hold
4099  * any locks.
4100  *
4101  * This function returns 0 always.
4102  **/
4103 int
4104 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4105 {
4106         struct lpfc_sli *psli = &phba->sli;
4107         uint16_t cfg_value;
4108         int rc = 0;
4109
4110         /* Reset HBA */
4111         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4112                         "0295 Reset HBA Data: x%x x%x x%x\n",
4113                         phba->pport->port_state, psli->sli_flag,
4114                         phba->hba_flag);
4115
4116         /* perform board reset */
4117         phba->fc_eventTag = 0;
4118         phba->link_events = 0;
4119         phba->pport->fc_myDID = 0;
4120         phba->pport->fc_prevDID = 0;
4121
4122         spin_lock_irq(&phba->hbalock);
4123         psli->sli_flag &= ~(LPFC_PROCESS_LA);
4124         phba->fcf.fcf_flag = 0;
4125         spin_unlock_irq(&phba->hbalock);
4126
4127         /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4128         if (phba->hba_flag & HBA_FW_DUMP_OP) {
4129                 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4130                 return rc;
4131         }
4132
4133         /* Now physically reset the device */
4134         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4135                         "0389 Performing PCI function reset!\n");
4136
4137         /* Turn off parity checking and serr during the physical reset */
4138         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4139         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4140                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4141
4142         /* Perform FCoE PCI function reset before freeing queue memory */
4143         rc = lpfc_pci_function_reset(phba);
4144         lpfc_sli4_queue_destroy(phba);
4145
4146         /* Restore PCI cmd register */
4147         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4148
4149         return rc;
4150 }
4151
4152 /**
4153  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4154  * @phba: Pointer to HBA context object.
4155  *
4156  * This function is called in the SLI initialization code path to
4157  * restart the HBA. The caller is not required to hold any lock.
4158  * This function writes MBX_RESTART mailbox command to the SLIM and
4159  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4160  * function to free any pending commands. The function enables
4161  * POST only during the first initialization. The function returns zero.
4162  * The function does not guarantee completion of MBX_RESTART mailbox
4163  * command before the return of this function.
4164  **/
4165 static int
4166 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4167 {
4168         MAILBOX_t *mb;
4169         struct lpfc_sli *psli;
4170         volatile uint32_t word0;
4171         void __iomem *to_slim;
4172         uint32_t hba_aer_enabled;
4173
4174         spin_lock_irq(&phba->hbalock);
4175
4176         /* Take PCIe device Advanced Error Reporting (AER) state */
4177         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4178
4179         psli = &phba->sli;
4180
4181         /* Restart HBA */
4182         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4183                         "0337 Restart HBA Data: x%x x%x\n",
4184                         phba->pport->port_state, psli->sli_flag);
4185
4186         word0 = 0;
4187         mb = (MAILBOX_t *) &word0;
4188         mb->mbxCommand = MBX_RESTART;
4189         mb->mbxHc = 1;
4190
4191         lpfc_reset_barrier(phba);
4192
4193         to_slim = phba->MBslimaddr;
4194         writel(*(uint32_t *) mb, to_slim);
4195         readl(to_slim); /* flush */
4196
4197         /* Only skip post after fc_ffinit is completed */
4198         if (phba->pport->port_state)
4199                 word0 = 1;      /* This is really setting up word1 */
4200         else
4201                 word0 = 0;      /* This is really setting up word1 */
4202         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4203         writel(*(uint32_t *) mb, to_slim);
4204         readl(to_slim); /* flush */
4205
4206         lpfc_sli_brdreset(phba);
4207         phba->pport->stopped = 0;
4208         phba->link_state = LPFC_INIT_START;
4209         phba->hba_flag = 0;
4210         spin_unlock_irq(&phba->hbalock);
4211
4212         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4213         psli->stats_start = get_seconds();
4214
4215         /* Give the INITFF and Post time to settle. */
4216         mdelay(100);
4217
4218         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4219         if (hba_aer_enabled)
4220                 pci_disable_pcie_error_reporting(phba->pcidev);
4221
4222         lpfc_hba_down_post(phba);
4223
4224         return 0;
4225 }
4226
4227 /**
4228  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4229  * @phba: Pointer to HBA context object.
4230  *
4231  * This function is called in the SLI initialization code path to restart
4232  * a SLI4 HBA. The caller is not required to hold any lock.
4233  * At the end of the function, it calls lpfc_hba_down_post function to
4234  * free any pending commands.
4235  **/
4236 static int
4237 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4238 {
4239         struct lpfc_sli *psli = &phba->sli;
4240         uint32_t hba_aer_enabled;
4241         int rc;
4242
4243         /* Restart HBA */
4244         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4245                         "0296 Restart HBA Data: x%x x%x\n",
4246                         phba->pport->port_state, psli->sli_flag);
4247
4248         /* Take PCIe device Advanced Error Reporting (AER) state */
4249         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4250
4251         rc = lpfc_sli4_brdreset(phba);
4252
4253         spin_lock_irq(&phba->hbalock);
4254         phba->pport->stopped = 0;
4255         phba->link_state = LPFC_INIT_START;
4256         phba->hba_flag = 0;
4257         spin_unlock_irq(&phba->hbalock);
4258
4259         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4260         psli->stats_start = get_seconds();
4261
4262         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4263         if (hba_aer_enabled)
4264                 pci_disable_pcie_error_reporting(phba->pcidev);
4265
4266         lpfc_hba_down_post(phba);
4267
4268         return rc;
4269 }
4270
4271 /**
4272  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4273  * @phba: Pointer to HBA context object.
4274  *
4275  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4276  * API jump table function pointer from the lpfc_hba struct.
4277 **/
4278 int
4279 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4280 {
4281         return phba->lpfc_sli_brdrestart(phba);
4282 }
4283
4284 /**
4285  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4286  * @phba: Pointer to HBA context object.
4287  *
4288  * This function is called after a HBA restart to wait for successful
4289  * restart of the HBA. Successful restart of the HBA is indicated by
4290  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4291  * iteration, the function will restart the HBA again. The function returns
4292  * zero if HBA successfully restarted else returns negative error code.
4293  **/
4294 static int
4295 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4296 {
4297         uint32_t status, i = 0;
4298
4299         /* Read the HBA Host Status Register */
4300         if (lpfc_readl(phba->HSregaddr, &status))
4301                 return -EIO;
4302
4303         /* Check status register to see what current state is */
4304         i = 0;
4305         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4306
4307                 /* Check every 10ms for 10 retries, then every 100ms for 90
4308                  * retries, then every 1 sec for 50 retires for a total of
4309                  * ~60 seconds before reset the board again and check every
4310                  * 1 sec for 50 retries. The up to 60 seconds before the
4311                  * board ready is required by the Falcon FIPS zeroization
4312                  * complete, and any reset the board in between shall cause
4313                  * restart of zeroization, further delay the board ready.
4314                  */
4315                 if (i++ >= 200) {
4316                         /* Adapter failed to init, timeout, status reg
4317                            <status> */
4318                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4319                                         "0436 Adapter failed to init, "
4320                                         "timeout, status reg x%x, "
4321                                         "FW Data: A8 x%x AC x%x\n", status,
4322                                         readl(phba->MBslimaddr + 0xa8),
4323                                         readl(phba->MBslimaddr + 0xac));
4324                         phba->link_state = LPFC_HBA_ERROR;
4325                         return -ETIMEDOUT;
4326                 }
4327
4328                 /* Check to see if any errors occurred during init */
4329                 if (status & HS_FFERM) {
4330                         /* ERROR: During chipset initialization */
4331                         /* Adapter failed to init, chipset, status reg
4332                            <status> */
4333                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4334                                         "0437 Adapter failed to init, "
4335                                         "chipset, status reg x%x, "
4336                                         "FW Data: A8 x%x AC x%x\n", status,
4337                                         readl(phba->MBslimaddr + 0xa8),
4338                                         readl(phba->MBslimaddr + 0xac));
4339                         phba->link_state = LPFC_HBA_ERROR;
4340                         return -EIO;
4341                 }
4342
4343                 if (i <= 10)
4344                         msleep(10);
4345                 else if (i <= 100)
4346                         msleep(100);
4347                 else
4348                         msleep(1000);
4349
4350                 if (i == 150) {
4351                         /* Do post */
4352                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4353                         lpfc_sli_brdrestart(phba);
4354                 }
4355                 /* Read the HBA Host Status Register */
4356                 if (lpfc_readl(phba->HSregaddr, &status))
4357                         return -EIO;
4358         }
4359
4360         /* Check to see if any errors occurred during init */
4361         if (status & HS_FFERM) {
4362                 /* ERROR: During chipset initialization */
4363                 /* Adapter failed to init, chipset, status reg <status> */
4364                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4365                                 "0438 Adapter failed to init, chipset, "
4366                                 "status reg x%x, "
4367                                 "FW Data: A8 x%x AC x%x\n", status,
4368                                 readl(phba->MBslimaddr + 0xa8),
4369                                 readl(phba->MBslimaddr + 0xac));
4370                 phba->link_state = LPFC_HBA_ERROR;
4371                 return -EIO;
4372         }
4373
4374         /* Clear all interrupt enable conditions */
4375         writel(0, phba->HCregaddr);
4376         readl(phba->HCregaddr); /* flush */
4377
4378         /* setup host attn register */
4379         writel(0xffffffff, phba->HAregaddr);
4380         readl(phba->HAregaddr); /* flush */
4381         return 0;
4382 }
4383
4384 /**
4385  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4386  *
4387  * This function calculates and returns the number of HBQs required to be
4388  * configured.
4389  **/
4390 int
4391 lpfc_sli_hbq_count(void)
4392 {
4393         return ARRAY_SIZE(lpfc_hbq_defs);
4394 }
4395
4396 /**
4397  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4398  *
4399  * This function adds the number of hbq entries in every HBQ to get
4400  * the total number of hbq entries required for the HBA and returns
4401  * the total count.
4402  **/
4403 static int
4404 lpfc_sli_hbq_entry_count(void)
4405 {
4406         int  hbq_count = lpfc_sli_hbq_count();
4407         int  count = 0;
4408         int  i;
4409
4410         for (i = 0; i < hbq_count; ++i)
4411                 count += lpfc_hbq_defs[i]->entry_count;
4412         return count;
4413 }
4414
4415 /**
4416  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4417  *
4418  * This function calculates amount of memory required for all hbq entries
4419  * to be configured and returns the total memory required.
4420  **/
4421 int
4422 lpfc_sli_hbq_size(void)
4423 {
4424         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4425 }
4426
4427 /**
4428  * lpfc_sli_hbq_setup - configure and initialize HBQs
4429  * @phba: Pointer to HBA context object.
4430  *
4431  * This function is called during the SLI initialization to configure
4432  * all the HBQs and post buffers to the HBQ. The caller is not
4433  * required to hold any locks. This function will return zero if successful
4434  * else it will return negative error code.
4435  **/
4436 static int
4437 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4438 {
4439         int  hbq_count = lpfc_sli_hbq_count();
4440         LPFC_MBOXQ_t *pmb;
4441         MAILBOX_t *pmbox;
4442         uint32_t hbqno;
4443         uint32_t hbq_entry_index;
4444
4445                                 /* Get a Mailbox buffer to setup mailbox
4446                                  * commands for HBA initialization
4447                                  */
4448         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4449
4450         if (!pmb)
4451                 return -ENOMEM;
4452
4453         pmbox = &pmb->u.mb;
4454
4455         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4456         phba->link_state = LPFC_INIT_MBX_CMDS;
4457         phba->hbq_in_use = 1;
4458
4459         hbq_entry_index = 0;
4460         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4461                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4462                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4463                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4464                 phba->hbqs[hbqno].entry_count =
4465                         lpfc_hbq_defs[hbqno]->entry_count;
4466                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4467                         hbq_entry_index, pmb);
4468                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4469
4470                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4471                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4472                            mbxStatus <status>, ring <num> */
4473
4474                         lpfc_printf_log(phba, KERN_ERR,
4475                                         LOG_SLI | LOG_VPORT,
4476                                         "1805 Adapter failed to init. "
4477                                         "Data: x%x x%x x%x\n",
4478                                         pmbox->mbxCommand,
4479                                         pmbox->mbxStatus, hbqno);
4480
4481                         phba->link_state = LPFC_HBA_ERROR;
4482                         mempool_free(pmb, phba->mbox_mem_pool);
4483                         return -ENXIO;
4484                 }
4485         }
4486         phba->hbq_count = hbq_count;
4487
4488         mempool_free(pmb, phba->mbox_mem_pool);
4489
4490         /* Initially populate or replenish the HBQs */
4491         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4492                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4493         return 0;
4494 }
4495
4496 /**
4497  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4498  * @phba: Pointer to HBA context object.
4499  *
4500  * This function is called during the SLI initialization to configure
4501  * all the HBQs and post buffers to the HBQ. The caller is not
4502  * required to hold any locks. This function will return zero if successful
4503  * else it will return negative error code.
4504  **/
4505 static int
4506 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4507 {
4508         phba->hbq_in_use = 1;
4509         phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4510         phba->hbq_count = 1;
4511         /* Initially populate or replenish the HBQs */
4512         lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4513         return 0;
4514 }
4515
4516 /**
4517  * lpfc_sli_config_port - Issue config port mailbox command
4518  * @phba: Pointer to HBA context object.
4519  * @sli_mode: sli mode - 2/3
4520  *
4521  * This function is called by the sli intialization code path
4522  * to issue config_port mailbox command. This function restarts the
4523  * HBA firmware and issues a config_port mailbox command to configure
4524  * the SLI interface in the sli mode specified by sli_mode
4525  * variable. The caller is not required to hold any locks.
4526  * The function returns 0 if successful, else returns negative error
4527  * code.
4528  **/
4529 int
4530 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4531 {
4532         LPFC_MBOXQ_t *pmb;
4533         uint32_t resetcount = 0, rc = 0, done = 0;
4534
4535         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4536         if (!pmb) {
4537                 phba->link_state = LPFC_HBA_ERROR;
4538                 return -ENOMEM;
4539         }
4540
4541         phba->sli_rev = sli_mode;
4542         while (resetcount < 2 && !done) {
4543                 spin_lock_irq(&phba->hbalock);
4544                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4545                 spin_unlock_irq(&phba->hbalock);
4546                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4547                 lpfc_sli_brdrestart(phba);
4548                 rc = lpfc_sli_chipset_init(phba);
4549                 if (rc)
4550                         break;
4551
4552                 spin_lock_irq(&phba->hbalock);
4553                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4554                 spin_unlock_irq(&phba->hbalock);
4555                 resetcount++;
4556
4557                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4558                  * value of 0 means the call was successful.  Any other
4559                  * nonzero value is a failure, but if ERESTART is returned,
4560                  * the driver may reset the HBA and try again.
4561                  */
4562                 rc = lpfc_config_port_prep(phba);
4563                 if (rc == -ERESTART) {
4564                         phba->link_state = LPFC_LINK_UNKNOWN;
4565                         continue;
4566                 } else if (rc)
4567                         break;
4568
4569                 phba->link_state = LPFC_INIT_MBX_CMDS;
4570                 lpfc_config_port(phba, pmb);
4571                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4572                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4573                                         LPFC_SLI3_HBQ_ENABLED |
4574                                         LPFC_SLI3_CRP_ENABLED |
4575                                         LPFC_SLI3_BG_ENABLED |
4576                                         LPFC_SLI3_DSS_ENABLED);
4577                 if (rc != MBX_SUCCESS) {
4578                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4579                                 "0442 Adapter failed to init, mbxCmd x%x "
4580                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4581                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4582                         spin_lock_irq(&phba->hbalock);
4583                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4584                         spin_unlock_irq(&phba->hbalock);
4585                         rc = -ENXIO;
4586                 } else {
4587                         /* Allow asynchronous mailbox command to go through */
4588                         spin_lock_irq(&phba->hbalock);
4589                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4590                         spin_unlock_irq(&phba->hbalock);
4591                         done = 1;
4592
4593                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4594                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
4595                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4596                                         "3110 Port did not grant ASABT\n");
4597                 }
4598         }
4599         if (!done) {
4600                 rc = -EINVAL;
4601                 goto do_prep_failed;
4602         }
4603         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4604                 if (!pmb->u.mb.un.varCfgPort.cMA) {
4605                         rc = -ENXIO;
4606                         goto do_prep_failed;
4607                 }
4608                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4609                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4610                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4611                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4612                                 phba->max_vpi : phba->max_vports;
4613
4614                 } else
4615                         phba->max_vpi = 0;
4616                 phba->fips_level = 0;
4617                 phba->fips_spec_rev = 0;
4618                 if (pmb->u.mb.un.varCfgPort.gdss) {
4619                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4620                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4621                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4622                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4623                                         "2850 Security Crypto Active. FIPS x%d "
4624                                         "(Spec Rev: x%d)",
4625                                         phba->fips_level, phba->fips_spec_rev);
4626                 }
4627                 if (pmb->u.mb.un.varCfgPort.sec_err) {
4628                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4629                                         "2856 Config Port Security Crypto "
4630                                         "Error: x%x ",
4631                                         pmb->u.mb.un.varCfgPort.sec_err);
4632                 }
4633                 if (pmb->u.mb.un.varCfgPort.gerbm)
4634                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4635                 if (pmb->u.mb.un.varCfgPort.gcrp)
4636                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4637
4638                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4639                 phba->port_gp = phba->mbox->us.s3_pgp.port;
4640
4641                 if (phba->cfg_enable_bg) {
4642                         if (pmb->u.mb.un.varCfgPort.gbg)
4643                                 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4644                         else
4645                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4646                                                 "0443 Adapter did not grant "
4647                                                 "BlockGuard\n");
4648                 }
4649         } else {
4650                 phba->hbq_get = NULL;
4651                 phba->port_gp = phba->mbox->us.s2.port;
4652                 phba->max_vpi = 0;
4653         }
4654 do_prep_failed:
4655         mempool_free(pmb, phba->mbox_mem_pool);
4656         return rc;
4657 }
4658
4659
4660 /**
4661  * lpfc_sli_hba_setup - SLI intialization function
4662  * @phba: Pointer to HBA context object.
4663  *
4664  * This function is the main SLI intialization function. This function
4665  * is called by the HBA intialization code, HBA reset code and HBA
4666  * error attention handler code. Caller is not required to hold any
4667  * locks. This function issues config_port mailbox command to configure
4668  * the SLI, setup iocb rings and HBQ rings. In the end the function
4669  * calls the config_port_post function to issue init_link mailbox
4670  * command and to start the discovery. The function will return zero
4671  * if successful, else it will return negative error code.
4672  **/
4673 int
4674 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4675 {
4676         uint32_t rc;
4677         int  mode = 3, i;
4678         int longs;
4679
4680         switch (phba->cfg_sli_mode) {
4681         case 2:
4682                 if (phba->cfg_enable_npiv) {
4683                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4684                                 "1824 NPIV enabled: Override sli_mode "
4685                                 "parameter (%d) to auto (0).\n",
4686                                 phba->cfg_sli_mode);
4687                         break;
4688                 }
4689                 mode = 2;
4690                 break;
4691         case 0:
4692         case 3:
4693                 break;
4694         default:
4695                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4696                                 "1819 Unrecognized sli_mode parameter: %d.\n",
4697                                 phba->cfg_sli_mode);
4698
4699                 break;
4700         }
4701         phba->fcp_embed_io = 0; /* SLI4 FC support only */
4702
4703         rc = lpfc_sli_config_port(phba, mode);
4704
4705         if (rc && phba->cfg_sli_mode == 3)
4706                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4707                                 "1820 Unable to select SLI-3.  "
4708                                 "Not supported by adapter.\n");
4709         if (rc && mode != 2)
4710                 rc = lpfc_sli_config_port(phba, 2);
4711         else if (rc && mode == 2)
4712                 rc = lpfc_sli_config_port(phba, 3);
4713         if (rc)
4714                 goto lpfc_sli_hba_setup_error;
4715
4716         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4717         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4718                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4719                 if (!rc) {
4720                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4721                                         "2709 This device supports "
4722                                         "Advanced Error Reporting (AER)\n");
4723                         spin_lock_irq(&phba->hbalock);
4724                         phba->hba_flag |= HBA_AER_ENABLED;
4725                         spin_unlock_irq(&phba->hbalock);
4726                 } else {
4727                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4728                                         "2708 This device does not support "
4729                                         "Advanced Error Reporting (AER): %d\n",
4730                                         rc);
4731                         phba->cfg_aer_support = 0;
4732                 }
4733         }
4734
4735         if (phba->sli_rev == 3) {
4736                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4737                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4738         } else {
4739                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4740                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4741                 phba->sli3_options = 0;
4742         }
4743
4744         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4745                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4746                         phba->sli_rev, phba->max_vpi);
4747         rc = lpfc_sli_ring_map(phba);
4748
4749         if (rc)
4750                 goto lpfc_sli_hba_setup_error;
4751
4752         /* Initialize VPIs. */
4753         if (phba->sli_rev == LPFC_SLI_REV3) {
4754                 /*
4755                  * The VPI bitmask and physical ID array are allocated
4756                  * and initialized once only - at driver load.  A port
4757                  * reset doesn't need to reinitialize this memory.
4758                  */
4759                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4760                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4761                         phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4762                                                   GFP_KERNEL);
4763                         if (!phba->vpi_bmask) {
4764                                 rc = -ENOMEM;
4765                                 goto lpfc_sli_hba_setup_error;
4766                         }
4767
4768                         phba->vpi_ids = kzalloc(
4769                                         (phba->max_vpi+1) * sizeof(uint16_t),
4770                                         GFP_KERNEL);
4771                         if (!phba->vpi_ids) {
4772                                 kfree(phba->vpi_bmask);
4773                                 rc = -ENOMEM;
4774                                 goto lpfc_sli_hba_setup_error;
4775                         }
4776                         for (i = 0; i < phba->max_vpi; i++)
4777                                 phba->vpi_ids[i] = i;
4778                 }
4779         }
4780
4781         /* Init HBQs */
4782         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4783                 rc = lpfc_sli_hbq_setup(phba);
4784                 if (rc)
4785                         goto lpfc_sli_hba_setup_error;
4786         }
4787         spin_lock_irq(&phba->hbalock);
4788         phba->sli.sli_flag |= LPFC_PROCESS_LA;
4789         spin_unlock_irq(&phba->hbalock);
4790
4791         rc = lpfc_config_port_post(phba);
4792         if (rc)
4793                 goto lpfc_sli_hba_setup_error;
4794
4795         return rc;
4796
4797 lpfc_sli_hba_setup_error:
4798         phba->link_state = LPFC_HBA_ERROR;
4799         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4800                         "0445 Firmware initialization failed\n");
4801         return rc;
4802 }
4803
4804 /**
4805  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4806  * @phba: Pointer to HBA context object.
4807  * @mboxq: mailbox pointer.
4808  * This function issue a dump mailbox command to read config region
4809  * 23 and parse the records in the region and populate driver
4810  * data structure.
4811  **/
4812 static int
4813 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4814 {
4815         LPFC_MBOXQ_t *mboxq;
4816         struct lpfc_dmabuf *mp;
4817         struct lpfc_mqe *mqe;
4818         uint32_t data_length;
4819         int rc;
4820
4821         /* Program the default value of vlan_id and fc_map */
4822         phba->valid_vlan = 0;
4823         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4824         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4825         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4826
4827         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4828         if (!mboxq)
4829                 return -ENOMEM;
4830
4831         mqe = &mboxq->u.mqe;
4832         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4833                 rc = -ENOMEM;
4834                 goto out_free_mboxq;
4835         }
4836
4837         mp = (struct lpfc_dmabuf *) mboxq->context1;
4838         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4839
4840         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4841                         "(%d):2571 Mailbox cmd x%x Status x%x "
4842                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4843                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4844                         "CQ: x%x x%x x%x x%x\n",
4845                         mboxq->vport ? mboxq->vport->vpi : 0,
4846                         bf_get(lpfc_mqe_command, mqe),
4847                         bf_get(lpfc_mqe_status, mqe),
4848                         mqe->un.mb_words[0], mqe->un.mb_words[1],
4849                         mqe->un.mb_words[2], mqe->un.mb_words[3],
4850                         mqe->un.mb_words[4], mqe->un.mb_words[5],
4851                         mqe->un.mb_words[6], mqe->un.mb_words[7],
4852                         mqe->un.mb_words[8], mqe->un.mb_words[9],
4853                         mqe->un.mb_words[10], mqe->un.mb_words[11],
4854                         mqe->un.mb_words[12], mqe->un.mb_words[13],
4855                         mqe->un.mb_words[14], mqe->un.mb_words[15],
4856                         mqe->un.mb_words[16], mqe->un.mb_words[50],
4857                         mboxq->mcqe.word0,
4858                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
4859                         mboxq->mcqe.trailer);
4860
4861         if (rc) {
4862                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4863                 kfree(mp);
4864                 rc = -EIO;
4865                 goto out_free_mboxq;
4866         }
4867         data_length = mqe->un.mb_words[5];
4868         if (data_length > DMP_RGN23_SIZE) {
4869                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4870                 kfree(mp);
4871                 rc = -EIO;
4872                 goto out_free_mboxq;
4873         }
4874
4875         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4876         lpfc_mbuf_free(phba, mp->virt, mp->phys);
4877         kfree(mp);
4878         rc = 0;
4879
4880 out_free_mboxq:
4881         mempool_free(mboxq, phba->mbox_mem_pool);
4882         return rc;
4883 }
4884
4885 /**
4886  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4887  * @phba: pointer to lpfc hba data structure.
4888  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4889  * @vpd: pointer to the memory to hold resulting port vpd data.
4890  * @vpd_size: On input, the number of bytes allocated to @vpd.
4891  *            On output, the number of data bytes in @vpd.
4892  *
4893  * This routine executes a READ_REV SLI4 mailbox command.  In
4894  * addition, this routine gets the port vpd data.
4895  *
4896  * Return codes
4897  *      0 - successful
4898  *      -ENOMEM - could not allocated memory.
4899  **/
4900 static int
4901 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4902                     uint8_t *vpd, uint32_t *vpd_size)
4903 {
4904         int rc = 0;
4905         uint32_t dma_size;
4906         struct lpfc_dmabuf *dmabuf;
4907         struct lpfc_mqe *mqe;
4908
4909         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4910         if (!dmabuf)
4911                 return -ENOMEM;
4912
4913         /*
4914          * Get a DMA buffer for the vpd data resulting from the READ_REV
4915          * mailbox command.
4916          */
4917         dma_size = *vpd_size;
4918         dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4919                                            &dmabuf->phys, GFP_KERNEL);
4920         if (!dmabuf->virt) {
4921                 kfree(dmabuf);
4922                 return -ENOMEM;
4923         }
4924
4925         /*
4926          * The SLI4 implementation of READ_REV conflicts at word1,
4927          * bits 31:16 and SLI4 adds vpd functionality not present
4928          * in SLI3.  This code corrects the conflicts.
4929          */
4930         lpfc_read_rev(phba, mboxq);
4931         mqe = &mboxq->u.mqe;
4932         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4933         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4934         mqe->un.read_rev.word1 &= 0x0000FFFF;
4935         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4936         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4937
4938         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4939         if (rc) {
4940                 dma_free_coherent(&phba->pcidev->dev, dma_size,
4941                                   dmabuf->virt, dmabuf->phys);
4942                 kfree(dmabuf);
4943                 return -EIO;
4944         }
4945
4946         /*
4947          * The available vpd length cannot be bigger than the
4948          * DMA buffer passed to the port.  Catch the less than
4949          * case and update the caller's size.
4950          */
4951         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4952                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4953
4954         memcpy(vpd, dmabuf->virt, *vpd_size);
4955
4956         dma_free_coherent(&phba->pcidev->dev, dma_size,
4957                           dmabuf->virt, dmabuf->phys);
4958         kfree(dmabuf);
4959         return 0;
4960 }
4961
4962 /**
4963  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4964  * @phba: pointer to lpfc hba data structure.
4965  *
4966  * This routine retrieves SLI4 device physical port name this PCI function
4967  * is attached to.
4968  *
4969  * Return codes
4970  *      0 - successful
4971  *      otherwise - failed to retrieve physical port name
4972  **/
4973 static int
4974 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4975 {
4976         LPFC_MBOXQ_t *mboxq;
4977         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4978         struct lpfc_controller_attribute *cntl_attr;
4979         struct lpfc_mbx_get_port_name *get_port_name;
4980         void *virtaddr = NULL;
4981         uint32_t alloclen, reqlen;
4982         uint32_t shdr_status, shdr_add_status;
4983         union lpfc_sli4_cfg_shdr *shdr;
4984         char cport_name = 0;
4985         int rc;
4986
4987         /* We assume nothing at this point */
4988         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4989         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4990
4991         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4992         if (!mboxq)
4993                 return -ENOMEM;
4994         /* obtain link type and link number via READ_CONFIG */
4995         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4996         lpfc_sli4_read_config(phba);
4997         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4998                 goto retrieve_ppname;
4999
5000         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5001         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5002         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5003                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5004                         LPFC_SLI4_MBX_NEMBED);
5005         if (alloclen < reqlen) {
5006                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5007                                 "3084 Allocated DMA memory size (%d) is "
5008                                 "less than the requested DMA memory size "
5009                                 "(%d)\n", alloclen, reqlen);
5010                 rc = -ENOMEM;
5011                 goto out_free_mboxq;
5012         }
5013         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5014         virtaddr = mboxq->sge_array->addr[0];
5015         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5016         shdr = &mbx_cntl_attr->cfg_shdr;
5017         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5018         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5019         if (shdr_status || shdr_add_status || rc) {
5020                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5021                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5022                                 "rc:x%x, status:x%x, add_status:x%x\n",
5023                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5024                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5025                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5026                                 rc, shdr_status, shdr_add_status);
5027                 rc = -ENXIO;
5028                 goto out_free_mboxq;
5029         }
5030         cntl_attr = &mbx_cntl_attr->cntl_attr;
5031         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5032         phba->sli4_hba.lnk_info.lnk_tp =
5033                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5034         phba->sli4_hba.lnk_info.lnk_no =
5035                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5036         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5037                         "3086 lnk_type:%d, lnk_numb:%d\n",
5038                         phba->sli4_hba.lnk_info.lnk_tp,
5039                         phba->sli4_hba.lnk_info.lnk_no);
5040
5041 retrieve_ppname:
5042         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5043                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5044                 sizeof(struct lpfc_mbx_get_port_name) -
5045                 sizeof(struct lpfc_sli4_cfg_mhdr),
5046                 LPFC_SLI4_MBX_EMBED);
5047         get_port_name = &mboxq->u.mqe.un.get_port_name;
5048         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5049         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5050         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5051                 phba->sli4_hba.lnk_info.lnk_tp);
5052         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5053         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5054         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5055         if (shdr_status || shdr_add_status || rc) {
5056                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5057                                 "3087 Mailbox x%x (x%x/x%x) failed: "
5058                                 "rc:x%x, status:x%x, add_status:x%x\n",
5059                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5060                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5061                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5062                                 rc, shdr_status, shdr_add_status);
5063                 rc = -ENXIO;
5064                 goto out_free_mboxq;
5065         }
5066         switch (phba->sli4_hba.lnk_info.lnk_no) {
5067         case LPFC_LINK_NUMBER_0:
5068                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5069                                 &get_port_name->u.response);
5070                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5071                 break;
5072         case LPFC_LINK_NUMBER_1:
5073                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5074                                 &get_port_name->u.response);
5075                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5076                 break;
5077         case LPFC_LINK_NUMBER_2:
5078                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5079                                 &get_port_name->u.response);
5080                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5081                 break;
5082         case LPFC_LINK_NUMBER_3:
5083                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5084                                 &get_port_name->u.response);
5085                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5086                 break;
5087         default:
5088                 break;
5089         }
5090
5091         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5092                 phba->Port[0] = cport_name;
5093                 phba->Port[1] = '\0';
5094                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5095                                 "3091 SLI get port name: %s\n", phba->Port);
5096         }
5097
5098 out_free_mboxq:
5099         if (rc != MBX_TIMEOUT) {
5100                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5101                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5102                 else
5103                         mempool_free(mboxq, phba->mbox_mem_pool);
5104         }
5105         return rc;
5106 }
5107
5108 /**
5109  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5110  * @phba: pointer to lpfc hba data structure.
5111  *
5112  * This routine is called to explicitly arm the SLI4 device's completion and
5113  * event queues
5114  **/
5115 static void
5116 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5117 {
5118         int fcp_eqidx;
5119
5120         lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5121         lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5122         fcp_eqidx = 0;
5123         if (phba->sli4_hba.fcp_cq) {
5124                 do {
5125                         lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
5126                                              LPFC_QUEUE_REARM);
5127                 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
5128         }
5129
5130         if (phba->cfg_fof)
5131                 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5132
5133         if (phba->sli4_hba.hba_eq) {
5134                 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
5135                      fcp_eqidx++)
5136                         lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
5137                                              LPFC_QUEUE_REARM);
5138         }
5139
5140         if (phba->cfg_fof)
5141                 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5142 }
5143
5144 /**
5145  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5146  * @phba: Pointer to HBA context object.
5147  * @type: The resource extent type.
5148  * @extnt_count: buffer to hold port available extent count.
5149  * @extnt_size: buffer to hold element count per extent.
5150  *
5151  * This function calls the port and retrievs the number of available
5152  * extents and their size for a particular extent type.
5153  *
5154  * Returns: 0 if successful.  Nonzero otherwise.
5155  **/
5156 int
5157 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5158                                uint16_t *extnt_count, uint16_t *extnt_size)
5159 {
5160         int rc = 0;
5161         uint32_t length;
5162         uint32_t mbox_tmo;
5163         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5164         LPFC_MBOXQ_t *mbox;
5165
5166         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5167         if (!mbox)
5168                 return -ENOMEM;
5169
5170         /* Find out how many extents are available for this resource type */
5171         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5172                   sizeof(struct lpfc_sli4_cfg_mhdr));
5173         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5174                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5175                          length, LPFC_SLI4_MBX_EMBED);
5176
5177         /* Send an extents count of 0 - the GET doesn't use it. */
5178         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5179                                         LPFC_SLI4_MBX_EMBED);
5180         if (unlikely(rc)) {
5181                 rc = -EIO;
5182                 goto err_exit;
5183         }
5184
5185         if (!phba->sli4_hba.intr_enable)
5186                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5187         else {
5188                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5189                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5190         }
5191         if (unlikely(rc)) {
5192                 rc = -EIO;
5193                 goto err_exit;
5194         }
5195
5196         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5197         if (bf_get(lpfc_mbox_hdr_status,
5198                    &rsrc_info->header.cfg_shdr.response)) {
5199                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5200                                 "2930 Failed to get resource extents "
5201                                 "Status 0x%x Add'l Status 0x%x\n",
5202                                 bf_get(lpfc_mbox_hdr_status,
5203                                        &rsrc_info->header.cfg_shdr.response),
5204                                 bf_get(lpfc_mbox_hdr_add_status,
5205                                        &rsrc_info->header.cfg_shdr.response));
5206                 rc = -EIO;
5207                 goto err_exit;
5208         }
5209
5210         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5211                               &rsrc_info->u.rsp);
5212         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5213                              &rsrc_info->u.rsp);
5214
5215         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5216                         "3162 Retrieved extents type-%d from port: count:%d, "
5217                         "size:%d\n", type, *extnt_count, *extnt_size);
5218
5219 err_exit:
5220         mempool_free(mbox, phba->mbox_mem_pool);
5221         return rc;
5222 }
5223
5224 /**
5225  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5226  * @phba: Pointer to HBA context object.
5227  * @type: The extent type to check.
5228  *
5229  * This function reads the current available extents from the port and checks
5230  * if the extent count or extent size has changed since the last access.
5231  * Callers use this routine post port reset to understand if there is a
5232  * extent reprovisioning requirement.
5233  *
5234  * Returns:
5235  *   -Error: error indicates problem.
5236  *   1: Extent count or size has changed.
5237  *   0: No changes.
5238  **/
5239 static int
5240 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5241 {
5242         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5243         uint16_t size_diff, rsrc_ext_size;
5244         int rc = 0;
5245         struct lpfc_rsrc_blks *rsrc_entry;
5246         struct list_head *rsrc_blk_list = NULL;
5247
5248         size_diff = 0;
5249         curr_ext_cnt = 0;
5250         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5251                                             &rsrc_ext_cnt,
5252                                             &rsrc_ext_size);
5253         if (unlikely(rc))
5254                 return -EIO;
5255
5256         switch (type) {
5257         case LPFC_RSC_TYPE_FCOE_RPI:
5258                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5259                 break;
5260         case LPFC_RSC_TYPE_FCOE_VPI:
5261                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5262                 break;
5263         case LPFC_RSC_TYPE_FCOE_XRI:
5264                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5265                 break;
5266         case LPFC_RSC_TYPE_FCOE_VFI:
5267                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5268                 break;
5269         default:
5270                 break;
5271         }
5272
5273         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5274                 curr_ext_cnt++;
5275                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5276                         size_diff++;
5277         }
5278
5279         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5280                 rc = 1;
5281
5282         return rc;
5283 }
5284
5285 /**
5286  * lpfc_sli4_cfg_post_extnts -
5287  * @phba: Pointer to HBA context object.
5288  * @extnt_cnt - number of available extents.
5289  * @type - the extent type (rpi, xri, vfi, vpi).
5290  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5291  * @mbox - pointer to the caller's allocated mailbox structure.
5292  *
5293  * This function executes the extents allocation request.  It also
5294  * takes care of the amount of memory needed to allocate or get the
5295  * allocated extents. It is the caller's responsibility to evaluate
5296  * the response.
5297  *
5298  * Returns:
5299  *   -Error:  Error value describes the condition found.
5300  *   0: if successful
5301  **/
5302 static int
5303 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5304                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5305 {
5306         int rc = 0;
5307         uint32_t req_len;
5308         uint32_t emb_len;
5309         uint32_t alloc_len, mbox_tmo;
5310
5311         /* Calculate the total requested length of the dma memory */
5312         req_len = extnt_cnt * sizeof(uint16_t);
5313
5314         /*
5315          * Calculate the size of an embedded mailbox.  The uint32_t
5316          * accounts for extents-specific word.
5317          */
5318         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5319                 sizeof(uint32_t);
5320
5321         /*
5322          * Presume the allocation and response will fit into an embedded
5323          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5324          */
5325         *emb = LPFC_SLI4_MBX_EMBED;
5326         if (req_len > emb_len) {
5327                 req_len = extnt_cnt * sizeof(uint16_t) +
5328                         sizeof(union lpfc_sli4_cfg_shdr) +
5329                         sizeof(uint32_t);
5330                 *emb = LPFC_SLI4_MBX_NEMBED;
5331         }
5332
5333         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5334                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5335                                      req_len, *emb);
5336         if (alloc_len < req_len) {
5337                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5338                         "2982 Allocated DMA memory size (x%x) is "
5339                         "less than the requested DMA memory "
5340                         "size (x%x)\n", alloc_len, req_len);
5341                 return -ENOMEM;
5342         }
5343         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5344         if (unlikely(rc))
5345                 return -EIO;
5346
5347         if (!phba->sli4_hba.intr_enable)
5348                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5349         else {
5350                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5351                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5352         }
5353
5354         if (unlikely(rc))
5355                 rc = -EIO;
5356         return rc;
5357 }
5358
5359 /**
5360  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5361  * @phba: Pointer to HBA context object.
5362  * @type:  The resource extent type to allocate.
5363  *
5364  * This function allocates the number of elements for the specified
5365  * resource type.
5366  **/
5367 static int
5368 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5369 {
5370         bool emb = false;
5371         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5372         uint16_t rsrc_id, rsrc_start, j, k;
5373         uint16_t *ids;
5374         int i, rc;
5375         unsigned long longs;
5376         unsigned long *bmask;
5377         struct lpfc_rsrc_blks *rsrc_blks;
5378         LPFC_MBOXQ_t *mbox;
5379         uint32_t length;
5380         struct lpfc_id_range *id_array = NULL;
5381         void *virtaddr = NULL;
5382         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5383         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5384         struct list_head *ext_blk_list;
5385
5386         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5387                                             &rsrc_cnt,
5388                                             &rsrc_size);
5389         if (unlikely(rc))
5390                 return -EIO;
5391
5392         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5393                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5394                         "3009 No available Resource Extents "
5395                         "for resource type 0x%x: Count: 0x%x, "
5396                         "Size 0x%x\n", type, rsrc_cnt,
5397                         rsrc_size);
5398                 return -ENOMEM;
5399         }
5400
5401         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5402                         "2903 Post resource extents type-0x%x: "
5403                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5404
5405         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5406         if (!mbox)
5407                 return -ENOMEM;
5408
5409         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5410         if (unlikely(rc)) {
5411                 rc = -EIO;
5412                 goto err_exit;
5413         }
5414
5415         /*
5416          * Figure out where the response is located.  Then get local pointers
5417          * to the response data.  The port does not guarantee to respond to
5418          * all extents counts request so update the local variable with the
5419          * allocated count from the port.
5420          */
5421         if (emb == LPFC_SLI4_MBX_EMBED) {
5422                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5423                 id_array = &rsrc_ext->u.rsp.id[0];
5424                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5425         } else {
5426                 virtaddr = mbox->sge_array->addr[0];
5427                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5428                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5429                 id_array = &n_rsrc->id;
5430         }
5431
5432         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5433         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5434
5435         /*
5436          * Based on the resource size and count, correct the base and max
5437          * resource values.
5438          */
5439         length = sizeof(struct lpfc_rsrc_blks);
5440         switch (type) {
5441         case LPFC_RSC_TYPE_FCOE_RPI:
5442                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5443                                                    sizeof(unsigned long),
5444                                                    GFP_KERNEL);
5445                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5446                         rc = -ENOMEM;
5447                         goto err_exit;
5448                 }
5449                 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5450                                                  sizeof(uint16_t),
5451                                                  GFP_KERNEL);
5452                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5453                         kfree(phba->sli4_hba.rpi_bmask);
5454                         rc = -ENOMEM;
5455                         goto err_exit;
5456                 }
5457
5458                 /*
5459                  * The next_rpi was initialized with the maximum available
5460                  * count but the port may allocate a smaller number.  Catch
5461                  * that case and update the next_rpi.
5462                  */
5463                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5464
5465                 /* Initialize local ptrs for common extent processing later. */
5466                 bmask = phba->sli4_hba.rpi_bmask;
5467                 ids = phba->sli4_hba.rpi_ids;
5468                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5469                 break;
5470         case LPFC_RSC_TYPE_FCOE_VPI:
5471                 phba->vpi_bmask = kzalloc(longs *
5472                                           sizeof(unsigned long),
5473                                           GFP_KERNEL);
5474                 if (unlikely(!phba->vpi_bmask)) {
5475                         rc = -ENOMEM;
5476                         goto err_exit;
5477                 }
5478                 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5479                                          sizeof(uint16_t),
5480                                          GFP_KERNEL);
5481                 if (unlikely(!phba->vpi_ids)) {
5482                         kfree(phba->vpi_bmask);
5483                         rc = -ENOMEM;
5484                         goto err_exit;
5485                 }
5486
5487                 /* Initialize local ptrs for common extent processing later. */
5488                 bmask = phba->vpi_bmask;
5489                 ids = phba->vpi_ids;
5490                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5491                 break;
5492         case LPFC_RSC_TYPE_FCOE_XRI:
5493                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5494                                                    sizeof(unsigned long),
5495                                                    GFP_KERNEL);
5496                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5497                         rc = -ENOMEM;
5498                         goto err_exit;
5499                 }
5500                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5501                 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5502                                                  sizeof(uint16_t),
5503                                                  GFP_KERNEL);
5504                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5505                         kfree(phba->sli4_hba.xri_bmask);
5506                         rc = -ENOMEM;
5507                         goto err_exit;
5508                 }
5509
5510                 /* Initialize local ptrs for common extent processing later. */
5511                 bmask = phba->sli4_hba.xri_bmask;
5512                 ids = phba->sli4_hba.xri_ids;
5513                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5514                 break;
5515         case LPFC_RSC_TYPE_FCOE_VFI:
5516                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5517                                                    sizeof(unsigned long),
5518                                                    GFP_KERNEL);
5519                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5520                         rc = -ENOMEM;
5521                         goto err_exit;
5522                 }
5523                 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5524                                                  sizeof(uint16_t),
5525                                                  GFP_KERNEL);
5526                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5527                         kfree(phba->sli4_hba.vfi_bmask);
5528                         rc = -ENOMEM;
5529                         goto err_exit;
5530                 }
5531
5532                 /* Initialize local ptrs for common extent processing later. */
5533                 bmask = phba->sli4_hba.vfi_bmask;
5534                 ids = phba->sli4_hba.vfi_ids;
5535                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5536                 break;
5537         default:
5538                 /* Unsupported Opcode.  Fail call. */
5539                 id_array = NULL;
5540                 bmask = NULL;
5541                 ids = NULL;
5542                 ext_blk_list = NULL;
5543                 goto err_exit;
5544         }
5545
5546         /*
5547          * Complete initializing the extent configuration with the
5548          * allocated ids assigned to this function.  The bitmask serves
5549          * as an index into the array and manages the available ids.  The
5550          * array just stores the ids communicated to the port via the wqes.
5551          */
5552         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5553                 if ((i % 2) == 0)
5554                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5555                                          &id_array[k]);
5556                 else
5557                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5558                                          &id_array[k]);
5559
5560                 rsrc_blks = kzalloc(length, GFP_KERNEL);
5561                 if (unlikely(!rsrc_blks)) {
5562                         rc = -ENOMEM;
5563                         kfree(bmask);
5564                         kfree(ids);
5565                         goto err_exit;
5566                 }
5567                 rsrc_blks->rsrc_start = rsrc_id;
5568                 rsrc_blks->rsrc_size = rsrc_size;
5569                 list_add_tail(&rsrc_blks->list, ext_blk_list);
5570                 rsrc_start = rsrc_id;
5571                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5572                         phba->sli4_hba.scsi_xri_start = rsrc_start +
5573                                 lpfc_sli4_get_els_iocb_cnt(phba);
5574
5575                 while (rsrc_id < (rsrc_start + rsrc_size)) {
5576                         ids[j] = rsrc_id;
5577                         rsrc_id++;
5578                         j++;
5579                 }
5580                 /* Entire word processed.  Get next word.*/
5581                 if ((i % 2) == 1)
5582                         k++;
5583         }
5584  err_exit:
5585         lpfc_sli4_mbox_cmd_free(phba, mbox);
5586         return rc;
5587 }
5588
5589 /**
5590  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5591  * @phba: Pointer to HBA context object.
5592  * @type: the extent's type.
5593  *
5594  * This function deallocates all extents of a particular resource type.
5595  * SLI4 does not allow for deallocating a particular extent range.  It
5596  * is the caller's responsibility to release all kernel memory resources.
5597  **/
5598 static int
5599 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5600 {
5601         int rc;
5602         uint32_t length, mbox_tmo = 0;
5603         LPFC_MBOXQ_t *mbox;
5604         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5605         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5606
5607         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5608         if (!mbox)
5609                 return -ENOMEM;
5610
5611         /*
5612          * This function sends an embedded mailbox because it only sends the
5613          * the resource type.  All extents of this type are released by the
5614          * port.
5615          */
5616         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5617                   sizeof(struct lpfc_sli4_cfg_mhdr));
5618         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5619                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5620                          length, LPFC_SLI4_MBX_EMBED);
5621
5622         /* Send an extents count of 0 - the dealloc doesn't use it. */
5623         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5624                                         LPFC_SLI4_MBX_EMBED);
5625         if (unlikely(rc)) {
5626                 rc = -EIO;
5627                 goto out_free_mbox;
5628         }
5629         if (!phba->sli4_hba.intr_enable)
5630                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5631         else {
5632                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5633                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5634         }
5635         if (unlikely(rc)) {
5636                 rc = -EIO;
5637                 goto out_free_mbox;
5638         }
5639
5640         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5641         if (bf_get(lpfc_mbox_hdr_status,
5642                    &dealloc_rsrc->header.cfg_shdr.response)) {
5643                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5644                                 "2919 Failed to release resource extents "
5645                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
5646                                 "Resource memory not released.\n",
5647                                 type,
5648                                 bf_get(lpfc_mbox_hdr_status,
5649                                     &dealloc_rsrc->header.cfg_shdr.response),
5650                                 bf_get(lpfc_mbox_hdr_add_status,
5651                                     &dealloc_rsrc->header.cfg_shdr.response));
5652                 rc = -EIO;
5653                 goto out_free_mbox;
5654         }
5655
5656         /* Release kernel memory resources for the specific type. */
5657         switch (type) {
5658         case LPFC_RSC_TYPE_FCOE_VPI:
5659                 kfree(phba->vpi_bmask);
5660                 kfree(phba->vpi_ids);
5661                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5662                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5663                                     &phba->lpfc_vpi_blk_list, list) {
5664                         list_del_init(&rsrc_blk->list);
5665                         kfree(rsrc_blk);
5666                 }
5667                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5668                 break;
5669         case LPFC_RSC_TYPE_FCOE_XRI:
5670                 kfree(phba->sli4_hba.xri_bmask);
5671                 kfree(phba->sli4_hba.xri_ids);
5672                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5673                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
5674                         list_del_init(&rsrc_blk->list);
5675                         kfree(rsrc_blk);
5676                 }
5677                 break;
5678         case LPFC_RSC_TYPE_FCOE_VFI:
5679                 kfree(phba->sli4_hba.vfi_bmask);
5680                 kfree(phba->sli4_hba.vfi_ids);
5681                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5682                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5683                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5684                         list_del_init(&rsrc_blk->list);
5685                         kfree(rsrc_blk);
5686                 }
5687                 break;
5688         case LPFC_RSC_TYPE_FCOE_RPI:
5689                 /* RPI bitmask and physical id array are cleaned up earlier. */
5690                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5691                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5692                         list_del_init(&rsrc_blk->list);
5693                         kfree(rsrc_blk);
5694                 }
5695                 break;
5696         default:
5697                 break;
5698         }
5699
5700         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5701
5702  out_free_mbox:
5703         mempool_free(mbox, phba->mbox_mem_pool);
5704         return rc;
5705 }
5706
5707 static void
5708 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5709                   uint32_t feature)
5710 {
5711         uint32_t len;
5712
5713         len = sizeof(struct lpfc_mbx_set_feature) -
5714                 sizeof(struct lpfc_sli4_cfg_mhdr);
5715         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5716                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
5717                          LPFC_SLI4_MBX_EMBED);
5718
5719         switch (feature) {
5720         case LPFC_SET_UE_RECOVERY:
5721                 bf_set(lpfc_mbx_set_feature_UER,
5722                        &mbox->u.mqe.un.set_feature, 1);
5723                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5724                 mbox->u.mqe.un.set_feature.param_len = 8;
5725                 break;
5726         case LPFC_SET_MDS_DIAGS:
5727                 bf_set(lpfc_mbx_set_feature_mds,
5728                        &mbox->u.mqe.un.set_feature, 1);
5729                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5730                        &mbox->u.mqe.un.set_feature, 0);
5731                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5732                 mbox->u.mqe.un.set_feature.param_len = 8;
5733                 break;
5734         }
5735
5736         return;
5737 }
5738
5739 /**
5740  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5741  * @phba: Pointer to HBA context object.
5742  *
5743  * This function allocates all SLI4 resource identifiers.
5744  **/
5745 int
5746 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5747 {
5748         int i, rc, error = 0;
5749         uint16_t count, base;
5750         unsigned long longs;
5751
5752         if (!phba->sli4_hba.rpi_hdrs_in_use)
5753                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5754         if (phba->sli4_hba.extents_in_use) {
5755                 /*
5756                  * The port supports resource extents. The XRI, VPI, VFI, RPI
5757                  * resource extent count must be read and allocated before
5758                  * provisioning the resource id arrays.
5759                  */
5760                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5761                     LPFC_IDX_RSRC_RDY) {
5762                         /*
5763                          * Extent-based resources are set - the driver could
5764                          * be in a port reset. Figure out if any corrective
5765                          * actions need to be taken.
5766                          */
5767                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5768                                                  LPFC_RSC_TYPE_FCOE_VFI);
5769                         if (rc != 0)
5770                                 error++;
5771                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5772                                                  LPFC_RSC_TYPE_FCOE_VPI);
5773                         if (rc != 0)
5774                                 error++;
5775                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5776                                                  LPFC_RSC_TYPE_FCOE_XRI);
5777                         if (rc != 0)
5778                                 error++;
5779                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5780                                                  LPFC_RSC_TYPE_FCOE_RPI);
5781                         if (rc != 0)
5782                                 error++;
5783
5784                         /*
5785                          * It's possible that the number of resources
5786                          * provided to this port instance changed between
5787                          * resets.  Detect this condition and reallocate
5788                          * resources.  Otherwise, there is no action.
5789                          */
5790                         if (error) {
5791                                 lpfc_printf_log(phba, KERN_INFO,
5792                                                 LOG_MBOX | LOG_INIT,
5793                                                 "2931 Detected extent resource "
5794                                                 "change.  Reallocating all "
5795                                                 "extents.\n");
5796                                 rc = lpfc_sli4_dealloc_extent(phba,
5797                                                  LPFC_RSC_TYPE_FCOE_VFI);
5798                                 rc = lpfc_sli4_dealloc_extent(phba,
5799                                                  LPFC_RSC_TYPE_FCOE_VPI);
5800                                 rc = lpfc_sli4_dealloc_extent(phba,
5801                                                  LPFC_RSC_TYPE_FCOE_XRI);
5802                                 rc = lpfc_sli4_dealloc_extent(phba,
5803                                                  LPFC_RSC_TYPE_FCOE_RPI);
5804                         } else
5805                                 return 0;
5806                 }
5807
5808                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5809                 if (unlikely(rc))
5810                         goto err_exit;
5811
5812                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5813                 if (unlikely(rc))
5814                         goto err_exit;
5815
5816                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5817                 if (unlikely(rc))
5818                         goto err_exit;
5819
5820                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5821                 if (unlikely(rc))
5822                         goto err_exit;
5823                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5824                        LPFC_IDX_RSRC_RDY);
5825                 return rc;
5826         } else {
5827                 /*
5828                  * The port does not support resource extents.  The XRI, VPI,
5829                  * VFI, RPI resource ids were determined from READ_CONFIG.
5830                  * Just allocate the bitmasks and provision the resource id
5831                  * arrays.  If a port reset is active, the resources don't
5832                  * need any action - just exit.
5833                  */
5834                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5835                     LPFC_IDX_RSRC_RDY) {
5836                         lpfc_sli4_dealloc_resource_identifiers(phba);
5837                         lpfc_sli4_remove_rpis(phba);
5838                 }
5839                 /* RPIs. */
5840                 count = phba->sli4_hba.max_cfg_param.max_rpi;
5841                 if (count <= 0) {
5842                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5843                                         "3279 Invalid provisioning of "
5844                                         "rpi:%d\n", count);
5845                         rc = -EINVAL;
5846                         goto err_exit;
5847                 }
5848                 base = phba->sli4_hba.max_cfg_param.rpi_base;
5849                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5850                 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5851                                                    sizeof(unsigned long),
5852                                                    GFP_KERNEL);
5853                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5854                         rc = -ENOMEM;
5855                         goto err_exit;
5856                 }
5857                 phba->sli4_hba.rpi_ids = kzalloc(count *
5858                                                  sizeof(uint16_t),
5859                                                  GFP_KERNEL);
5860                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5861                         rc = -ENOMEM;
5862                         goto free_rpi_bmask;
5863                 }
5864
5865                 for (i = 0; i < count; i++)
5866                         phba->sli4_hba.rpi_ids[i] = base + i;
5867
5868                 /* VPIs. */
5869                 count = phba->sli4_hba.max_cfg_param.max_vpi;
5870                 if (count <= 0) {
5871                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5872                                         "3280 Invalid provisioning of "
5873                                         "vpi:%d\n", count);
5874                         rc = -EINVAL;
5875                         goto free_rpi_ids;
5876                 }
5877                 base = phba->sli4_hba.max_cfg_param.vpi_base;
5878                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5879                 phba->vpi_bmask = kzalloc(longs *
5880                                           sizeof(unsigned long),
5881                                           GFP_KERNEL);
5882                 if (unlikely(!phba->vpi_bmask)) {
5883                         rc = -ENOMEM;
5884                         goto free_rpi_ids;
5885                 }
5886                 phba->vpi_ids = kzalloc(count *
5887                                         sizeof(uint16_t),
5888                                         GFP_KERNEL);
5889                 if (unlikely(!phba->vpi_ids)) {
5890                         rc = -ENOMEM;
5891                         goto free_vpi_bmask;
5892                 }
5893
5894                 for (i = 0; i < count; i++)
5895                         phba->vpi_ids[i] = base + i;
5896
5897                 /* XRIs. */
5898                 count = phba->sli4_hba.max_cfg_param.max_xri;
5899                 if (count <= 0) {
5900                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5901                                         "3281 Invalid provisioning of "
5902                                         "xri:%d\n", count);
5903                         rc = -EINVAL;
5904                         goto free_vpi_ids;
5905                 }
5906                 base = phba->sli4_hba.max_cfg_param.xri_base;
5907                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5908                 phba->sli4_hba.xri_bmask = kzalloc(longs *
5909                                                    sizeof(unsigned long),
5910                                                    GFP_KERNEL);
5911                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5912                         rc = -ENOMEM;
5913                         goto free_vpi_ids;
5914                 }
5915                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5916                 phba->sli4_hba.xri_ids = kzalloc(count *
5917                                                  sizeof(uint16_t),
5918                                                  GFP_KERNEL);
5919                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5920                         rc = -ENOMEM;
5921                         goto free_xri_bmask;
5922                 }
5923
5924                 for (i = 0; i < count; i++)
5925                         phba->sli4_hba.xri_ids[i] = base + i;
5926
5927                 /* VFIs. */
5928                 count = phba->sli4_hba.max_cfg_param.max_vfi;
5929                 if (count <= 0) {
5930                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5931                                         "3282 Invalid provisioning of "
5932                                         "vfi:%d\n", count);
5933                         rc = -EINVAL;
5934                         goto free_xri_ids;
5935                 }
5936                 base = phba->sli4_hba.max_cfg_param.vfi_base;
5937                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5938                 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5939                                                    sizeof(unsigned long),
5940                                                    GFP_KERNEL);
5941                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5942                         rc = -ENOMEM;
5943                         goto free_xri_ids;
5944                 }
5945                 phba->sli4_hba.vfi_ids = kzalloc(count *
5946                                                  sizeof(uint16_t),
5947                                                  GFP_KERNEL);
5948                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5949                         rc = -ENOMEM;
5950                         goto free_vfi_bmask;
5951                 }
5952
5953                 for (i = 0; i < count; i++)
5954                         phba->sli4_hba.vfi_ids[i] = base + i;
5955
5956                 /*
5957                  * Mark all resources ready.  An HBA reset doesn't need
5958                  * to reset the initialization.
5959                  */
5960                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5961                        LPFC_IDX_RSRC_RDY);
5962                 return 0;
5963         }
5964
5965  free_vfi_bmask:
5966         kfree(phba->sli4_hba.vfi_bmask);
5967         phba->sli4_hba.vfi_bmask = NULL;
5968  free_xri_ids:
5969         kfree(phba->sli4_hba.xri_ids);
5970         phba->sli4_hba.xri_ids = NULL;
5971  free_xri_bmask:
5972         kfree(phba->sli4_hba.xri_bmask);
5973         phba->sli4_hba.xri_bmask = NULL;
5974  free_vpi_ids:
5975         kfree(phba->vpi_ids);
5976         phba->vpi_ids = NULL;
5977  free_vpi_bmask:
5978         kfree(phba->vpi_bmask);
5979         phba->vpi_bmask = NULL;
5980  free_rpi_ids:
5981         kfree(phba->sli4_hba.rpi_ids);
5982         phba->sli4_hba.rpi_ids = NULL;
5983  free_rpi_bmask:
5984         kfree(phba->sli4_hba.rpi_bmask);
5985         phba->sli4_hba.rpi_bmask = NULL;
5986  err_exit:
5987         return rc;
5988 }
5989
5990 /**
5991  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5992  * @phba: Pointer to HBA context object.
5993  *
5994  * This function allocates the number of elements for the specified
5995  * resource type.
5996  **/
5997 int
5998 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5999 {
6000         if (phba->sli4_hba.extents_in_use) {
6001                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6002                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6003                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6004                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6005         } else {
6006                 kfree(phba->vpi_bmask);
6007                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6008                 kfree(phba->vpi_ids);
6009                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6010                 kfree(phba->sli4_hba.xri_bmask);
6011                 kfree(phba->sli4_hba.xri_ids);
6012                 kfree(phba->sli4_hba.vfi_bmask);
6013                 kfree(phba->sli4_hba.vfi_ids);
6014                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6015                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6016         }
6017
6018         return 0;
6019 }
6020
6021 /**
6022  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6023  * @phba: Pointer to HBA context object.
6024  * @type: The resource extent type.
6025  * @extnt_count: buffer to hold port extent count response
6026  * @extnt_size: buffer to hold port extent size response.
6027  *
6028  * This function calls the port to read the host allocated extents
6029  * for a particular type.
6030  **/
6031 int
6032 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6033                                uint16_t *extnt_cnt, uint16_t *extnt_size)
6034 {
6035         bool emb;
6036         int rc = 0;
6037         uint16_t curr_blks = 0;
6038         uint32_t req_len, emb_len;
6039         uint32_t alloc_len, mbox_tmo;
6040         struct list_head *blk_list_head;
6041         struct lpfc_rsrc_blks *rsrc_blk;
6042         LPFC_MBOXQ_t *mbox;
6043         void *virtaddr = NULL;
6044         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6045         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6046         union  lpfc_sli4_cfg_shdr *shdr;
6047
6048         switch (type) {
6049         case LPFC_RSC_TYPE_FCOE_VPI:
6050                 blk_list_head = &phba->lpfc_vpi_blk_list;
6051                 break;
6052         case LPFC_RSC_TYPE_FCOE_XRI:
6053                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6054                 break;
6055         case LPFC_RSC_TYPE_FCOE_VFI:
6056                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6057                 break;
6058         case LPFC_RSC_TYPE_FCOE_RPI:
6059                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6060                 break;
6061         default:
6062                 return -EIO;
6063         }
6064
6065         /* Count the number of extents currently allocatd for this type. */
6066         list_for_each_entry(rsrc_blk, blk_list_head, list) {
6067                 if (curr_blks == 0) {
6068                         /*
6069                          * The GET_ALLOCATED mailbox does not return the size,
6070                          * just the count.  The size should be just the size
6071                          * stored in the current allocated block and all sizes
6072                          * for an extent type are the same so set the return
6073                          * value now.
6074                          */
6075                         *extnt_size = rsrc_blk->rsrc_size;
6076                 }
6077                 curr_blks++;
6078         }
6079
6080         /*
6081          * Calculate the size of an embedded mailbox.  The uint32_t
6082          * accounts for extents-specific word.
6083          */
6084         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6085                 sizeof(uint32_t);
6086
6087         /*
6088          * Presume the allocation and response will fit into an embedded
6089          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6090          */
6091         emb = LPFC_SLI4_MBX_EMBED;
6092         req_len = emb_len;
6093         if (req_len > emb_len) {
6094                 req_len = curr_blks * sizeof(uint16_t) +
6095                         sizeof(union lpfc_sli4_cfg_shdr) +
6096                         sizeof(uint32_t);
6097                 emb = LPFC_SLI4_MBX_NEMBED;
6098         }
6099
6100         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6101         if (!mbox)
6102                 return -ENOMEM;
6103         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6104
6105         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6106                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6107                                      req_len, emb);
6108         if (alloc_len < req_len) {
6109                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6110                         "2983 Allocated DMA memory size (x%x) is "
6111                         "less than the requested DMA memory "
6112                         "size (x%x)\n", alloc_len, req_len);
6113                 rc = -ENOMEM;
6114                 goto err_exit;
6115         }
6116         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6117         if (unlikely(rc)) {
6118                 rc = -EIO;
6119                 goto err_exit;
6120         }
6121
6122         if (!phba->sli4_hba.intr_enable)
6123                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6124         else {
6125                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6126                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6127         }
6128
6129         if (unlikely(rc)) {
6130                 rc = -EIO;
6131                 goto err_exit;
6132         }
6133
6134         /*
6135          * Figure out where the response is located.  Then get local pointers
6136          * to the response data.  The port does not guarantee to respond to
6137          * all extents counts request so update the local variable with the
6138          * allocated count from the port.
6139          */
6140         if (emb == LPFC_SLI4_MBX_EMBED) {
6141                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6142                 shdr = &rsrc_ext->header.cfg_shdr;
6143                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6144         } else {
6145                 virtaddr = mbox->sge_array->addr[0];
6146                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6147                 shdr = &n_rsrc->cfg_shdr;
6148                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6149         }
6150
6151         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6152                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6153                         "2984 Failed to read allocated resources "
6154                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6155                         type,
6156                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
6157                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6158                 rc = -EIO;
6159                 goto err_exit;
6160         }
6161  err_exit:
6162         lpfc_sli4_mbox_cmd_free(phba, mbox);
6163         return rc;
6164 }
6165
6166 /**
6167  * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
6168  * @phba: pointer to lpfc hba data structure.
6169  *
6170  * This routine walks the list of els buffers that have been allocated and
6171  * repost them to the port by using SGL block post. This is needed after a
6172  * pci_function_reset/warm_start or start. It attempts to construct blocks
6173  * of els buffer sgls which contains contiguous xris and uses the non-embedded
6174  * SGL block post mailbox commands to post them to the port. For single els
6175  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6176  * mailbox command for posting.
6177  *
6178  * Returns: 0 = success, non-zero failure.
6179  **/
6180 static int
6181 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6182 {
6183         struct lpfc_sglq *sglq_entry = NULL;
6184         struct lpfc_sglq *sglq_entry_next = NULL;
6185         struct lpfc_sglq *sglq_entry_first = NULL;
6186         int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6187         int last_xritag = NO_XRI;
6188         struct lpfc_sli_ring *pring;
6189         LIST_HEAD(prep_sgl_list);
6190         LIST_HEAD(blck_sgl_list);
6191         LIST_HEAD(allc_sgl_list);
6192         LIST_HEAD(post_sgl_list);
6193         LIST_HEAD(free_sgl_list);
6194
6195         pring = &phba->sli.ring[LPFC_ELS_RING];
6196         spin_lock_irq(&phba->hbalock);
6197         spin_lock(&pring->ring_lock);
6198         list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6199         spin_unlock(&pring->ring_lock);
6200         spin_unlock_irq(&phba->hbalock);
6201
6202         total_cnt = phba->sli4_hba.els_xri_cnt;
6203         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6204                                  &allc_sgl_list, list) {
6205                 list_del_init(&sglq_entry->list);
6206                 block_cnt++;
6207                 if ((last_xritag != NO_XRI) &&
6208                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
6209                         /* a hole in xri block, form a sgl posting block */
6210                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
6211                         post_cnt = block_cnt - 1;
6212                         /* prepare list for next posting block */
6213                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6214                         block_cnt = 1;
6215                 } else {
6216                         /* prepare list for next posting block */
6217                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6218                         /* enough sgls for non-embed sgl mbox command */
6219                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6220                                 list_splice_init(&prep_sgl_list,
6221                                                  &blck_sgl_list);
6222                                 post_cnt = block_cnt;
6223                                 block_cnt = 0;
6224                         }
6225                 }
6226                 num_posted++;
6227
6228                 /* keep track of last sgl's xritag */
6229                 last_xritag = sglq_entry->sli4_xritag;
6230
6231                 /* end of repost sgl list condition for els buffers */
6232                 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6233                         if (post_cnt == 0) {
6234                                 list_splice_init(&prep_sgl_list,
6235                                                  &blck_sgl_list);
6236                                 post_cnt = block_cnt;
6237                         } else if (block_cnt == 1) {
6238                                 status = lpfc_sli4_post_sgl(phba,
6239                                                 sglq_entry->phys, 0,
6240                                                 sglq_entry->sli4_xritag);
6241                                 if (!status) {
6242                                         /* successful, put sgl to posted list */
6243                                         list_add_tail(&sglq_entry->list,
6244                                                       &post_sgl_list);
6245                                 } else {
6246                                         /* Failure, put sgl to free list */
6247                                         lpfc_printf_log(phba, KERN_WARNING,
6248                                                 LOG_SLI,
6249                                                 "3159 Failed to post els "
6250                                                 "sgl, xritag:x%x\n",
6251                                                 sglq_entry->sli4_xritag);
6252                                         list_add_tail(&sglq_entry->list,
6253                                                       &free_sgl_list);
6254                                         total_cnt--;
6255                                 }
6256                         }
6257                 }
6258
6259                 /* continue until a nembed page worth of sgls */
6260                 if (post_cnt == 0)
6261                         continue;
6262
6263                 /* post the els buffer list sgls as a block */
6264                 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6265                                                      post_cnt);
6266
6267                 if (!status) {
6268                         /* success, put sgl list to posted sgl list */
6269                         list_splice_init(&blck_sgl_list, &post_sgl_list);
6270                 } else {
6271                         /* Failure, put sgl list to free sgl list */
6272                         sglq_entry_first = list_first_entry(&blck_sgl_list,
6273                                                             struct lpfc_sglq,
6274                                                             list);
6275                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6276                                         "3160 Failed to post els sgl-list, "
6277                                         "xritag:x%x-x%x\n",
6278                                         sglq_entry_first->sli4_xritag,
6279                                         (sglq_entry_first->sli4_xritag +
6280                                          post_cnt - 1));
6281                         list_splice_init(&blck_sgl_list, &free_sgl_list);
6282                         total_cnt -= post_cnt;
6283                 }
6284
6285                 /* don't reset xirtag due to hole in xri block */
6286                 if (block_cnt == 0)
6287                         last_xritag = NO_XRI;
6288
6289                 /* reset els sgl post count for next round of posting */
6290                 post_cnt = 0;
6291         }
6292         /* update the number of XRIs posted for ELS */
6293         phba->sli4_hba.els_xri_cnt = total_cnt;
6294
6295         /* free the els sgls failed to post */
6296         lpfc_free_sgl_list(phba, &free_sgl_list);
6297
6298         /* push els sgls posted to the availble list */
6299         if (!list_empty(&post_sgl_list)) {
6300                 spin_lock_irq(&phba->hbalock);
6301                 spin_lock(&pring->ring_lock);
6302                 list_splice_init(&post_sgl_list,
6303                                  &phba->sli4_hba.lpfc_sgl_list);
6304                 spin_unlock(&pring->ring_lock);
6305                 spin_unlock_irq(&phba->hbalock);
6306         } else {
6307                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6308                                 "3161 Failure to post els sgl to port.\n");
6309                 return -EIO;
6310         }
6311         return 0;
6312 }
6313
6314 /**
6315  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6316  * @phba: Pointer to HBA context object.
6317  *
6318  * This function is the main SLI4 device intialization PCI function. This
6319  * function is called by the HBA intialization code, HBA reset code and
6320  * HBA error attention handler code. Caller is not required to hold any
6321  * locks.
6322  **/
6323 int
6324 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6325 {
6326         int rc;
6327         LPFC_MBOXQ_t *mboxq;
6328         struct lpfc_mqe *mqe;
6329         uint8_t *vpd;
6330         uint32_t vpd_size;
6331         uint32_t ftr_rsp = 0;
6332         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6333         struct lpfc_vport *vport = phba->pport;
6334         struct lpfc_dmabuf *mp;
6335
6336         /* Perform a PCI function reset to start from clean */
6337         rc = lpfc_pci_function_reset(phba);
6338         if (unlikely(rc))
6339                 return -ENODEV;
6340
6341         /* Check the HBA Host Status Register for readyness */
6342         rc = lpfc_sli4_post_status_check(phba);
6343         if (unlikely(rc))
6344                 return -ENODEV;
6345         else {
6346                 spin_lock_irq(&phba->hbalock);
6347                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6348                 spin_unlock_irq(&phba->hbalock);
6349         }
6350
6351         /*
6352          * Allocate a single mailbox container for initializing the
6353          * port.
6354          */
6355         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6356         if (!mboxq)
6357                 return -ENOMEM;
6358
6359         /* Issue READ_REV to collect vpd and FW information. */
6360         vpd_size = SLI4_PAGE_SIZE;
6361         vpd = kzalloc(vpd_size, GFP_KERNEL);
6362         if (!vpd) {
6363                 rc = -ENOMEM;
6364                 goto out_free_mbox;
6365         }
6366
6367         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6368         if (unlikely(rc)) {
6369                 kfree(vpd);
6370                 goto out_free_mbox;
6371         }
6372
6373         mqe = &mboxq->u.mqe;
6374         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6375         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6376                 phba->hba_flag |= HBA_FCOE_MODE;
6377                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6378         } else {
6379                 phba->hba_flag &= ~HBA_FCOE_MODE;
6380         }
6381
6382         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6383                 LPFC_DCBX_CEE_MODE)
6384                 phba->hba_flag |= HBA_FIP_SUPPORT;
6385         else
6386                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6387
6388         phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6389
6390         if (phba->sli_rev != LPFC_SLI_REV4) {
6391                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6392                         "0376 READ_REV Error. SLI Level %d "
6393                         "FCoE enabled %d\n",
6394                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6395                 rc = -EIO;
6396                 kfree(vpd);
6397                 goto out_free_mbox;
6398         }
6399
6400         /*
6401          * Continue initialization with default values even if driver failed
6402          * to read FCoE param config regions, only read parameters if the
6403          * board is FCoE
6404          */
6405         if (phba->hba_flag & HBA_FCOE_MODE &&
6406             lpfc_sli4_read_fcoe_params(phba))
6407                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6408                         "2570 Failed to read FCoE parameters\n");
6409
6410         /*
6411          * Retrieve sli4 device physical port name, failure of doing it
6412          * is considered as non-fatal.
6413          */
6414         rc = lpfc_sli4_retrieve_pport_name(phba);
6415         if (!rc)
6416                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6417                                 "3080 Successful retrieving SLI4 device "
6418                                 "physical port name: %s.\n", phba->Port);
6419
6420         /*
6421          * Evaluate the read rev and vpd data. Populate the driver
6422          * state with the results. If this routine fails, the failure
6423          * is not fatal as the driver will use generic values.
6424          */
6425         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6426         if (unlikely(!rc)) {
6427                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6428                                 "0377 Error %d parsing vpd. "
6429                                 "Using defaults.\n", rc);
6430                 rc = 0;
6431         }
6432         kfree(vpd);
6433
6434         /* Save information as VPD data */
6435         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6436         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6437         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6438         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6439                                          &mqe->un.read_rev);
6440         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6441                                        &mqe->un.read_rev);
6442         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6443                                             &mqe->un.read_rev);
6444         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6445                                            &mqe->un.read_rev);
6446         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6447         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6448         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6449         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6450         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6451         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6452         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6453                         "(%d):0380 READ_REV Status x%x "
6454                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6455                         mboxq->vport ? mboxq->vport->vpi : 0,
6456                         bf_get(lpfc_mqe_status, mqe),
6457                         phba->vpd.rev.opFwName,
6458                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6459                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6460
6461         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
6462         rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6463         if (phba->pport->cfg_lun_queue_depth > rc) {
6464                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6465                                 "3362 LUN queue depth changed from %d to %d\n",
6466                                 phba->pport->cfg_lun_queue_depth, rc);
6467                 phba->pport->cfg_lun_queue_depth = rc;
6468         }
6469
6470         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6471             LPFC_SLI_INTF_IF_TYPE_0) {
6472                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6473                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6474                 if (rc == MBX_SUCCESS) {
6475                         phba->hba_flag |= HBA_RECOVERABLE_UE;
6476                         /* Set 1Sec interval to detect UE */
6477                         phba->eratt_poll_interval = 1;
6478                         phba->sli4_hba.ue_to_sr = bf_get(
6479                                         lpfc_mbx_set_feature_UESR,
6480                                         &mboxq->u.mqe.un.set_feature);
6481                         phba->sli4_hba.ue_to_rp = bf_get(
6482                                         lpfc_mbx_set_feature_UERP,
6483                                         &mboxq->u.mqe.un.set_feature);
6484                 }
6485         }
6486
6487         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6488                 /* Enable MDS Diagnostics only if the SLI Port supports it */
6489                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6490                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6491                 if (rc != MBX_SUCCESS)
6492                         phba->mds_diags_support = 0;
6493         }
6494
6495         /*
6496          * Discover the port's supported feature set and match it against the
6497          * hosts requests.
6498          */
6499         lpfc_request_features(phba, mboxq);
6500         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6501         if (unlikely(rc)) {
6502                 rc = -EIO;
6503                 goto out_free_mbox;
6504         }
6505
6506         /*
6507          * The port must support FCP initiator mode as this is the
6508          * only mode running in the host.
6509          */
6510         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6511                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6512                                 "0378 No support for fcpi mode.\n");
6513                 ftr_rsp++;
6514         }
6515         if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6516                 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6517         else
6518                 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6519         /*
6520          * If the port cannot support the host's requested features
6521          * then turn off the global config parameters to disable the
6522          * feature in the driver.  This is not a fatal error.
6523          */
6524         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6525         if (phba->cfg_enable_bg) {
6526                 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6527                         phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6528                 else
6529                         ftr_rsp++;
6530         }
6531
6532         if (phba->max_vpi && phba->cfg_enable_npiv &&
6533             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6534                 ftr_rsp++;
6535
6536         if (ftr_rsp) {
6537                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6538                                 "0379 Feature Mismatch Data: x%08x %08x "
6539                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6540                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6541                                 phba->cfg_enable_npiv, phba->max_vpi);
6542                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6543                         phba->cfg_enable_bg = 0;
6544                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6545                         phba->cfg_enable_npiv = 0;
6546         }
6547
6548         /* These SLI3 features are assumed in SLI4 */
6549         spin_lock_irq(&phba->hbalock);
6550         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6551         spin_unlock_irq(&phba->hbalock);
6552
6553         /*
6554          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6555          * calls depends on these resources to complete port setup.
6556          */
6557         rc = lpfc_sli4_alloc_resource_identifiers(phba);
6558         if (rc) {
6559                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6560                                 "2920 Failed to alloc Resource IDs "
6561                                 "rc = x%x\n", rc);
6562                 goto out_free_mbox;
6563         }
6564
6565         /* Read the port's service parameters. */
6566         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6567         if (rc) {
6568                 phba->link_state = LPFC_HBA_ERROR;
6569                 rc = -ENOMEM;
6570                 goto out_free_mbox;
6571         }
6572
6573         mboxq->vport = vport;
6574         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6575         mp = (struct lpfc_dmabuf *) mboxq->context1;
6576         if (rc == MBX_SUCCESS) {
6577                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6578                 rc = 0;
6579         }
6580
6581         /*
6582          * This memory was allocated by the lpfc_read_sparam routine. Release
6583          * it to the mbuf pool.
6584          */
6585         lpfc_mbuf_free(phba, mp->virt, mp->phys);
6586         kfree(mp);
6587         mboxq->context1 = NULL;
6588         if (unlikely(rc)) {
6589                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6590                                 "0382 READ_SPARAM command failed "
6591                                 "status %d, mbxStatus x%x\n",
6592                                 rc, bf_get(lpfc_mqe_status, mqe));
6593                 phba->link_state = LPFC_HBA_ERROR;
6594                 rc = -EIO;
6595                 goto out_free_mbox;
6596         }
6597
6598         lpfc_update_vport_wwn(vport);
6599
6600         /* Update the fc_host data structures with new wwn. */
6601         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6602         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6603
6604         /* update host els and scsi xri-sgl sizes and mappings */
6605         rc = lpfc_sli4_xri_sgl_update(phba);
6606         if (unlikely(rc)) {
6607                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6608                                 "1400 Failed to update xri-sgl size and "
6609                                 "mapping: %d\n", rc);
6610                 goto out_free_mbox;
6611         }
6612
6613         /* register the els sgl pool to the port */
6614         rc = lpfc_sli4_repost_els_sgl_list(phba);
6615         if (unlikely(rc)) {
6616                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6617                                 "0582 Error %d during els sgl post "
6618                                 "operation\n", rc);
6619                 rc = -ENODEV;
6620                 goto out_free_mbox;
6621         }
6622
6623         /* register the allocated scsi sgl pool to the port */
6624         rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6625         if (unlikely(rc)) {
6626                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6627                                 "0383 Error %d during scsi sgl post "
6628                                 "operation\n", rc);
6629                 /* Some Scsi buffers were moved to the abort scsi list */
6630                 /* A pci function reset will repost them */
6631                 rc = -ENODEV;
6632                 goto out_free_mbox;
6633         }
6634
6635         /* Post the rpi header region to the device. */
6636         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6637         if (unlikely(rc)) {
6638                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6639                                 "0393 Error %d during rpi post operation\n",
6640                                 rc);
6641                 rc = -ENODEV;
6642                 goto out_free_mbox;
6643         }
6644         lpfc_sli4_node_prep(phba);
6645
6646         /* Create all the SLI4 queues */
6647         rc = lpfc_sli4_queue_create(phba);
6648         if (rc) {
6649                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6650                                 "3089 Failed to allocate queues\n");
6651                 rc = -ENODEV;
6652                 goto out_stop_timers;
6653         }
6654         /* Set up all the queues to the device */
6655         rc = lpfc_sli4_queue_setup(phba);
6656         if (unlikely(rc)) {
6657                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6658                                 "0381 Error %d during queue setup.\n ", rc);
6659                 goto out_destroy_queue;
6660         }
6661
6662         /* Arm the CQs and then EQs on device */
6663         lpfc_sli4_arm_cqeq_intr(phba);
6664
6665         /* Indicate device interrupt mode */
6666         phba->sli4_hba.intr_enable = 1;
6667
6668         /* Allow asynchronous mailbox command to go through */
6669         spin_lock_irq(&phba->hbalock);
6670         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6671         spin_unlock_irq(&phba->hbalock);
6672
6673         /* Post receive buffers to the device */
6674         lpfc_sli4_rb_setup(phba);
6675
6676         /* Reset HBA FCF states after HBA reset */
6677         phba->fcf.fcf_flag = 0;
6678         phba->fcf.current_rec.flag = 0;
6679
6680         /* Start the ELS watchdog timer */
6681         mod_timer(&vport->els_tmofunc,
6682                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6683
6684         /* Start heart beat timer */
6685         mod_timer(&phba->hb_tmofunc,
6686                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6687         phba->hb_outstanding = 0;
6688         phba->last_completion_time = jiffies;
6689
6690         /* Start error attention (ERATT) polling timer */
6691         mod_timer(&phba->eratt_poll,
6692                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
6693
6694         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6695         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6696                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6697                 if (!rc) {
6698                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6699                                         "2829 This device supports "
6700                                         "Advanced Error Reporting (AER)\n");
6701                         spin_lock_irq(&phba->hbalock);
6702                         phba->hba_flag |= HBA_AER_ENABLED;
6703                         spin_unlock_irq(&phba->hbalock);
6704                 } else {
6705                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6706                                         "2830 This device does not support "
6707                                         "Advanced Error Reporting (AER)\n");
6708                         phba->cfg_aer_support = 0;
6709                 }
6710                 rc = 0;
6711         }
6712
6713         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6714                 /*
6715                  * The FC Port needs to register FCFI (index 0)
6716                  */
6717                 lpfc_reg_fcfi(phba, mboxq);
6718                 mboxq->vport = phba->pport;
6719                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6720                 if (rc != MBX_SUCCESS)
6721                         goto out_unset_queue;
6722                 rc = 0;
6723                 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6724                                         &mboxq->u.mqe.un.reg_fcfi);
6725
6726                 /* Check if the port is configured to be disabled */
6727                 lpfc_sli_read_link_ste(phba);
6728         }
6729
6730         /*
6731          * The port is ready, set the host's link state to LINK_DOWN
6732          * in preparation for link interrupts.
6733          */
6734         spin_lock_irq(&phba->hbalock);
6735         phba->link_state = LPFC_LINK_DOWN;
6736         spin_unlock_irq(&phba->hbalock);
6737         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6738             (phba->hba_flag & LINK_DISABLED)) {
6739                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6740                                 "3103 Adapter Link is disabled.\n");
6741                 lpfc_down_link(phba, mboxq);
6742                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6743                 if (rc != MBX_SUCCESS) {
6744                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6745                                         "3104 Adapter failed to issue "
6746                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6747                         goto out_unset_queue;
6748                 }
6749         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6750                 /* don't perform init_link on SLI4 FC port loopback test */
6751                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6752                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6753                         if (rc)
6754                                 goto out_unset_queue;
6755                 }
6756         }
6757         mempool_free(mboxq, phba->mbox_mem_pool);
6758         return rc;
6759 out_unset_queue:
6760         /* Unset all the queues set up in this routine when error out */
6761         lpfc_sli4_queue_unset(phba);
6762 out_destroy_queue:
6763         lpfc_sli4_queue_destroy(phba);
6764 out_stop_timers:
6765         lpfc_stop_hba_timers(phba);
6766 out_free_mbox:
6767         mempool_free(mboxq, phba->mbox_mem_pool);
6768         return rc;
6769 }
6770
6771 /**
6772  * lpfc_mbox_timeout - Timeout call back function for mbox timer
6773  * @ptr: context object - pointer to hba structure.
6774  *
6775  * This is the callback function for mailbox timer. The mailbox
6776  * timer is armed when a new mailbox command is issued and the timer
6777  * is deleted when the mailbox complete. The function is called by
6778  * the kernel timer code when a mailbox does not complete within
6779  * expected time. This function wakes up the worker thread to
6780  * process the mailbox timeout and returns. All the processing is
6781  * done by the worker thread function lpfc_mbox_timeout_handler.
6782  **/
6783 void
6784 lpfc_mbox_timeout(unsigned long ptr)
6785 {
6786         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6787         unsigned long iflag;
6788         uint32_t tmo_posted;
6789
6790         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6791         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6792         if (!tmo_posted)
6793                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6794         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6795
6796         if (!tmo_posted)
6797                 lpfc_worker_wake_up(phba);
6798         return;
6799 }
6800
6801 /**
6802  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
6803  *                                    are pending
6804  * @phba: Pointer to HBA context object.
6805  *
6806  * This function checks if any mailbox completions are present on the mailbox
6807  * completion queue.
6808  **/
6809 static bool
6810 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
6811 {
6812
6813         uint32_t idx;
6814         struct lpfc_queue *mcq;
6815         struct lpfc_mcqe *mcqe;
6816         bool pending_completions = false;
6817
6818         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6819                 return false;
6820
6821         /* Check for completions on mailbox completion queue */
6822
6823         mcq = phba->sli4_hba.mbx_cq;
6824         idx = mcq->hba_index;
6825         while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
6826                 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
6827                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
6828                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
6829                         pending_completions = true;
6830                         break;
6831                 }
6832                 idx = (idx + 1) % mcq->entry_count;
6833                 if (mcq->hba_index == idx)
6834                         break;
6835         }
6836         return pending_completions;
6837
6838 }
6839
6840 /**
6841  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
6842  *                                            that were missed.
6843  * @phba: Pointer to HBA context object.
6844  *
6845  * For sli4, it is possible to miss an interrupt. As such mbox completions
6846  * maybe missed causing erroneous mailbox timeouts to occur. This function
6847  * checks to see if mbox completions are on the mailbox completion queue
6848  * and will process all the completions associated with the eq for the
6849  * mailbox completion queue.
6850  **/
6851 bool
6852 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
6853 {
6854
6855         uint32_t eqidx;
6856         struct lpfc_queue *fpeq = NULL;
6857         struct lpfc_eqe *eqe;
6858         bool mbox_pending;
6859
6860         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6861                 return false;
6862
6863         /* Find the eq associated with the mcq */
6864
6865         if (phba->sli4_hba.hba_eq)
6866                 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
6867                         if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
6868                             phba->sli4_hba.mbx_cq->assoc_qid) {
6869                                 fpeq = phba->sli4_hba.hba_eq[eqidx];
6870                                 break;
6871                         }
6872         if (!fpeq)
6873                 return false;
6874
6875         /* Turn off interrupts from this EQ */
6876
6877         lpfc_sli4_eq_clr_intr(fpeq);
6878
6879         /* Check to see if a mbox completion is pending */
6880
6881         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
6882
6883         /*
6884          * If a mbox completion is pending, process all the events on EQ
6885          * associated with the mbox completion queue (this could include
6886          * mailbox commands, async events, els commands, receive queue data
6887          * and fcp commands)
6888          */
6889
6890         if (mbox_pending)
6891                 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
6892                         lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
6893                         fpeq->EQ_processed++;
6894                 }
6895
6896         /* Always clear and re-arm the EQ */
6897
6898         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
6899
6900         return mbox_pending;
6901
6902 }
6903
6904 /**
6905  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6906  * @phba: Pointer to HBA context object.
6907  *
6908  * This function is called from worker thread when a mailbox command times out.
6909  * The caller is not required to hold any locks. This function will reset the
6910  * HBA and recover all the pending commands.
6911  **/
6912 void
6913 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6914 {
6915         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6916         MAILBOX_t *mb = NULL;
6917
6918         struct lpfc_sli *psli = &phba->sli;
6919
6920         /* If the mailbox completed, process the completion and return */
6921         if (lpfc_sli4_process_missed_mbox_completions(phba))
6922                 return;
6923
6924         if (pmbox != NULL)
6925                 mb = &pmbox->u.mb;
6926         /* Check the pmbox pointer first.  There is a race condition
6927          * between the mbox timeout handler getting executed in the
6928          * worklist and the mailbox actually completing. When this
6929          * race condition occurs, the mbox_active will be NULL.
6930          */
6931         spin_lock_irq(&phba->hbalock);
6932         if (pmbox == NULL) {
6933                 lpfc_printf_log(phba, KERN_WARNING,
6934                                 LOG_MBOX | LOG_SLI,
6935                                 "0353 Active Mailbox cleared - mailbox timeout "
6936                                 "exiting\n");
6937                 spin_unlock_irq(&phba->hbalock);
6938                 return;
6939         }
6940
6941         /* Mbox cmd <mbxCommand> timeout */
6942         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6943                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6944                         mb->mbxCommand,
6945                         phba->pport->port_state,
6946                         phba->sli.sli_flag,
6947                         phba->sli.mbox_active);
6948         spin_unlock_irq(&phba->hbalock);
6949
6950         /* Setting state unknown so lpfc_sli_abort_iocb_ring
6951          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6952          * it to fail all outstanding SCSI IO.
6953          */
6954         spin_lock_irq(&phba->pport->work_port_lock);
6955         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6956         spin_unlock_irq(&phba->pport->work_port_lock);
6957         spin_lock_irq(&phba->hbalock);
6958         phba->link_state = LPFC_LINK_UNKNOWN;
6959         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6960         spin_unlock_irq(&phba->hbalock);
6961
6962         lpfc_sli_abort_fcp_rings(phba);
6963
6964         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6965                         "0345 Resetting board due to mailbox timeout\n");
6966
6967         /* Reset the HBA device */
6968         lpfc_reset_hba(phba);
6969 }
6970
6971 /**
6972  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6973  * @phba: Pointer to HBA context object.
6974  * @pmbox: Pointer to mailbox object.
6975  * @flag: Flag indicating how the mailbox need to be processed.
6976  *
6977  * This function is called by discovery code and HBA management code
6978  * to submit a mailbox command to firmware with SLI-3 interface spec. This
6979  * function gets the hbalock to protect the data structures.
6980  * The mailbox command can be submitted in polling mode, in which case
6981  * this function will wait in a polling loop for the completion of the
6982  * mailbox.
6983  * If the mailbox is submitted in no_wait mode (not polling) the
6984  * function will submit the command and returns immediately without waiting
6985  * for the mailbox completion. The no_wait is supported only when HBA
6986  * is in SLI2/SLI3 mode - interrupts are enabled.
6987  * The SLI interface allows only one mailbox pending at a time. If the
6988  * mailbox is issued in polling mode and there is already a mailbox
6989  * pending, then the function will return an error. If the mailbox is issued
6990  * in NO_WAIT mode and there is a mailbox pending already, the function
6991  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6992  * The sli layer owns the mailbox object until the completion of mailbox
6993  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6994  * return codes the caller owns the mailbox command after the return of
6995  * the function.
6996  **/
6997 static int
6998 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6999                        uint32_t flag)
7000 {
7001         MAILBOX_t *mbx;
7002         struct lpfc_sli *psli = &phba->sli;
7003         uint32_t status, evtctr;
7004         uint32_t ha_copy, hc_copy;
7005         int i;
7006         unsigned long timeout;
7007         unsigned long drvr_flag = 0;
7008         uint32_t word0, ldata;
7009         void __iomem *to_slim;
7010         int processing_queue = 0;
7011
7012         spin_lock_irqsave(&phba->hbalock, drvr_flag);
7013         if (!pmbox) {
7014                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7015                 /* processing mbox queue from intr_handler */
7016                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7017                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7018                         return MBX_SUCCESS;
7019                 }
7020                 processing_queue = 1;
7021                 pmbox = lpfc_mbox_get(phba);
7022                 if (!pmbox) {
7023                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7024                         return MBX_SUCCESS;
7025                 }
7026         }
7027
7028         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7029                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7030                 if(!pmbox->vport) {
7031                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7032                         lpfc_printf_log(phba, KERN_ERR,
7033                                         LOG_MBOX | LOG_VPORT,
7034                                         "1806 Mbox x%x failed. No vport\n",
7035                                         pmbox->u.mb.mbxCommand);
7036                         dump_stack();
7037                         goto out_not_finished;
7038                 }
7039         }
7040
7041         /* If the PCI channel is in offline state, do not post mbox. */
7042         if (unlikely(pci_channel_offline(phba->pcidev))) {
7043                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7044                 goto out_not_finished;
7045         }
7046
7047         /* If HBA has a deferred error attention, fail the iocb. */
7048         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7049                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7050                 goto out_not_finished;
7051         }
7052
7053         psli = &phba->sli;
7054
7055         mbx = &pmbox->u.mb;
7056         status = MBX_SUCCESS;
7057
7058         if (phba->link_state == LPFC_HBA_ERROR) {
7059                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7060
7061                 /* Mbox command <mbxCommand> cannot issue */
7062                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7063                                 "(%d):0311 Mailbox command x%x cannot "
7064                                 "issue Data: x%x x%x\n",
7065                                 pmbox->vport ? pmbox->vport->vpi : 0,
7066                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7067                 goto out_not_finished;
7068         }
7069
7070         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7071                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7072                         !(hc_copy & HC_MBINT_ENA)) {
7073                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7074                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7075                                 "(%d):2528 Mailbox command x%x cannot "
7076                                 "issue Data: x%x x%x\n",
7077                                 pmbox->vport ? pmbox->vport->vpi : 0,
7078                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7079                         goto out_not_finished;
7080                 }
7081         }
7082
7083         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7084                 /* Polling for a mbox command when another one is already active
7085                  * is not allowed in SLI. Also, the driver must have established
7086                  * SLI2 mode to queue and process multiple mbox commands.
7087                  */
7088
7089                 if (flag & MBX_POLL) {
7090                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7091
7092                         /* Mbox command <mbxCommand> cannot issue */
7093                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7094                                         "(%d):2529 Mailbox command x%x "
7095                                         "cannot issue Data: x%x x%x\n",
7096                                         pmbox->vport ? pmbox->vport->vpi : 0,
7097                                         pmbox->u.mb.mbxCommand,
7098                                         psli->sli_flag, flag);
7099                         goto out_not_finished;
7100                 }
7101
7102                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7103                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7104                         /* Mbox command <mbxCommand> cannot issue */
7105                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7106                                         "(%d):2530 Mailbox command x%x "
7107                                         "cannot issue Data: x%x x%x\n",
7108                                         pmbox->vport ? pmbox->vport->vpi : 0,
7109                                         pmbox->u.mb.mbxCommand,
7110                                         psli->sli_flag, flag);
7111                         goto out_not_finished;
7112                 }
7113
7114                 /* Another mailbox command is still being processed, queue this
7115                  * command to be processed later.
7116                  */
7117                 lpfc_mbox_put(phba, pmbox);
7118
7119                 /* Mbox cmd issue - BUSY */
7120                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7121                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
7122                                 "x%x x%x x%x x%x\n",
7123                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7124                                 mbx->mbxCommand, phba->pport->port_state,
7125                                 psli->sli_flag, flag);
7126
7127                 psli->slistat.mbox_busy++;
7128                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7129
7130                 if (pmbox->vport) {
7131                         lpfc_debugfs_disc_trc(pmbox->vport,
7132                                 LPFC_DISC_TRC_MBOX_VPORT,
7133                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
7134                                 (uint32_t)mbx->mbxCommand,
7135                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7136                 }
7137                 else {
7138                         lpfc_debugfs_disc_trc(phba->pport,
7139                                 LPFC_DISC_TRC_MBOX,
7140                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
7141                                 (uint32_t)mbx->mbxCommand,
7142                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7143                 }
7144
7145                 return MBX_BUSY;
7146         }
7147
7148         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7149
7150         /* If we are not polling, we MUST be in SLI2 mode */
7151         if (flag != MBX_POLL) {
7152                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7153                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
7154                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7155                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7156                         /* Mbox command <mbxCommand> cannot issue */
7157                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7158                                         "(%d):2531 Mailbox command x%x "
7159                                         "cannot issue Data: x%x x%x\n",
7160                                         pmbox->vport ? pmbox->vport->vpi : 0,
7161                                         pmbox->u.mb.mbxCommand,
7162                                         psli->sli_flag, flag);
7163                         goto out_not_finished;
7164                 }
7165                 /* timeout active mbox command */
7166                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7167                                            1000);
7168                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7169         }
7170
7171         /* Mailbox cmd <cmd> issue */
7172         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7173                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7174                         "x%x\n",
7175                         pmbox->vport ? pmbox->vport->vpi : 0,
7176                         mbx->mbxCommand, phba->pport->port_state,
7177                         psli->sli_flag, flag);
7178
7179         if (mbx->mbxCommand != MBX_HEARTBEAT) {
7180                 if (pmbox->vport) {
7181                         lpfc_debugfs_disc_trc(pmbox->vport,
7182                                 LPFC_DISC_TRC_MBOX_VPORT,
7183                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7184                                 (uint32_t)mbx->mbxCommand,
7185                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7186                 }
7187                 else {
7188                         lpfc_debugfs_disc_trc(phba->pport,
7189                                 LPFC_DISC_TRC_MBOX,
7190                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
7191                                 (uint32_t)mbx->mbxCommand,
7192                                 mbx->un.varWords[0], mbx->un.varWords[1]);
7193                 }
7194         }
7195
7196         psli->slistat.mbox_cmd++;
7197         evtctr = psli->slistat.mbox_event;
7198
7199         /* next set own bit for the adapter and copy over command word */
7200         mbx->mbxOwner = OWN_CHIP;
7201
7202         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7203                 /* Populate mbox extension offset word. */
7204                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7205                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7206                                 = (uint8_t *)phba->mbox_ext
7207                                   - (uint8_t *)phba->mbox;
7208                 }
7209
7210                 /* Copy the mailbox extension data */
7211                 if (pmbox->in_ext_byte_len && pmbox->context2) {
7212                         lpfc_sli_pcimem_bcopy(pmbox->context2,
7213                                 (uint8_t *)phba->mbox_ext,
7214                                 pmbox->in_ext_byte_len);
7215                 }
7216                 /* Copy command data to host SLIM area */
7217                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7218         } else {
7219                 /* Populate mbox extension offset word. */
7220                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7221                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7222                                 = MAILBOX_HBA_EXT_OFFSET;
7223
7224                 /* Copy the mailbox extension data */
7225                 if (pmbox->in_ext_byte_len && pmbox->context2) {
7226                         lpfc_memcpy_to_slim(phba->MBslimaddr +
7227                                 MAILBOX_HBA_EXT_OFFSET,
7228                                 pmbox->context2, pmbox->in_ext_byte_len);
7229
7230                 }
7231                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7232                         /* copy command data into host mbox for cmpl */
7233                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7234                 }
7235
7236                 /* First copy mbox command data to HBA SLIM, skip past first
7237                    word */
7238                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7239                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7240                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
7241
7242                 /* Next copy over first word, with mbxOwner set */
7243                 ldata = *((uint32_t *)mbx);
7244                 to_slim = phba->MBslimaddr;
7245                 writel(ldata, to_slim);
7246                 readl(to_slim); /* flush */
7247
7248                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7249                         /* switch over to host mailbox */
7250                         psli->sli_flag |= LPFC_SLI_ACTIVE;
7251                 }
7252         }
7253
7254         wmb();
7255
7256         switch (flag) {
7257         case MBX_NOWAIT:
7258                 /* Set up reference to mailbox command */
7259                 psli->mbox_active = pmbox;
7260                 /* Interrupt board to do it */
7261                 writel(CA_MBATT, phba->CAregaddr);
7262                 readl(phba->CAregaddr); /* flush */
7263                 /* Don't wait for it to finish, just return */
7264                 break;
7265
7266         case MBX_POLL:
7267                 /* Set up null reference to mailbox command */
7268                 psli->mbox_active = NULL;
7269                 /* Interrupt board to do it */
7270                 writel(CA_MBATT, phba->CAregaddr);
7271                 readl(phba->CAregaddr); /* flush */
7272
7273                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7274                         /* First read mbox status word */
7275                         word0 = *((uint32_t *)phba->mbox);
7276                         word0 = le32_to_cpu(word0);
7277                 } else {
7278                         /* First read mbox status word */
7279                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
7280                                 spin_unlock_irqrestore(&phba->hbalock,
7281                                                        drvr_flag);
7282                                 goto out_not_finished;
7283                         }
7284                 }
7285
7286                 /* Read the HBA Host Attention Register */
7287                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7288                         spin_unlock_irqrestore(&phba->hbalock,
7289                                                        drvr_flag);
7290                         goto out_not_finished;
7291                 }
7292                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7293                                                         1000) + jiffies;
7294                 i = 0;
7295                 /* Wait for command to complete */
7296                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7297                        (!(ha_copy & HA_MBATT) &&
7298                         (phba->link_state > LPFC_WARM_START))) {
7299                         if (time_after(jiffies, timeout)) {
7300                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7301                                 spin_unlock_irqrestore(&phba->hbalock,
7302                                                        drvr_flag);
7303                                 goto out_not_finished;
7304                         }
7305
7306                         /* Check if we took a mbox interrupt while we were
7307                            polling */
7308                         if (((word0 & OWN_CHIP) != OWN_CHIP)
7309                             && (evtctr != psli->slistat.mbox_event))
7310                                 break;
7311
7312                         if (i++ > 10) {
7313                                 spin_unlock_irqrestore(&phba->hbalock,
7314                                                        drvr_flag);
7315                                 msleep(1);
7316                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7317                         }
7318
7319                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7320                                 /* First copy command data */
7321                                 word0 = *((uint32_t *)phba->mbox);
7322                                 word0 = le32_to_cpu(word0);
7323                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7324                                         MAILBOX_t *slimmb;
7325                                         uint32_t slimword0;
7326                                         /* Check real SLIM for any errors */
7327                                         slimword0 = readl(phba->MBslimaddr);
7328                                         slimmb = (MAILBOX_t *) & slimword0;
7329                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7330                                             && slimmb->mbxStatus) {
7331                                                 psli->sli_flag &=
7332                                                     ~LPFC_SLI_ACTIVE;
7333                                                 word0 = slimword0;
7334                                         }
7335                                 }
7336                         } else {
7337                                 /* First copy command data */
7338                                 word0 = readl(phba->MBslimaddr);
7339                         }
7340                         /* Read the HBA Host Attention Register */
7341                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7342                                 spin_unlock_irqrestore(&phba->hbalock,
7343                                                        drvr_flag);
7344                                 goto out_not_finished;
7345                         }
7346                 }
7347
7348                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7349                         /* copy results back to user */
7350                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7351                         /* Copy the mailbox extension data */
7352                         if (pmbox->out_ext_byte_len && pmbox->context2) {
7353                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7354                                                       pmbox->context2,
7355                                                       pmbox->out_ext_byte_len);
7356                         }
7357                 } else {
7358                         /* First copy command data */
7359                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7360                                                         MAILBOX_CMD_SIZE);
7361                         /* Copy the mailbox extension data */
7362                         if (pmbox->out_ext_byte_len && pmbox->context2) {
7363                                 lpfc_memcpy_from_slim(pmbox->context2,
7364                                         phba->MBslimaddr +
7365                                         MAILBOX_HBA_EXT_OFFSET,
7366                                         pmbox->out_ext_byte_len);
7367                         }
7368                 }
7369
7370                 writel(HA_MBATT, phba->HAregaddr);
7371                 readl(phba->HAregaddr); /* flush */
7372
7373                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7374                 status = mbx->mbxStatus;
7375         }
7376
7377         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7378         return status;
7379
7380 out_not_finished:
7381         if (processing_queue) {
7382                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7383                 lpfc_mbox_cmpl_put(phba, pmbox);
7384         }
7385         return MBX_NOT_FINISHED;
7386 }
7387
7388 /**
7389  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7390  * @phba: Pointer to HBA context object.
7391  *
7392  * The function blocks the posting of SLI4 asynchronous mailbox commands from
7393  * the driver internal pending mailbox queue. It will then try to wait out the
7394  * possible outstanding mailbox command before return.
7395  *
7396  * Returns:
7397  *      0 - the outstanding mailbox command completed; otherwise, the wait for
7398  *      the outstanding mailbox command timed out.
7399  **/
7400 static int
7401 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7402 {
7403         struct lpfc_sli *psli = &phba->sli;
7404         int rc = 0;
7405         unsigned long timeout = 0;
7406
7407         /* Mark the asynchronous mailbox command posting as blocked */
7408         spin_lock_irq(&phba->hbalock);
7409         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7410         /* Determine how long we might wait for the active mailbox
7411          * command to be gracefully completed by firmware.
7412          */
7413         if (phba->sli.mbox_active)
7414                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7415                                                 phba->sli.mbox_active) *
7416                                                 1000) + jiffies;
7417         spin_unlock_irq(&phba->hbalock);
7418
7419         /* Make sure the mailbox is really active */
7420         if (timeout)
7421                 lpfc_sli4_process_missed_mbox_completions(phba);
7422
7423         /* Wait for the outstnading mailbox command to complete */
7424         while (phba->sli.mbox_active) {
7425                 /* Check active mailbox complete status every 2ms */
7426                 msleep(2);
7427                 if (time_after(jiffies, timeout)) {
7428                         /* Timeout, marked the outstanding cmd not complete */
7429                         rc = 1;
7430                         break;
7431                 }
7432         }
7433
7434         /* Can not cleanly block async mailbox command, fails it */
7435         if (rc) {
7436                 spin_lock_irq(&phba->hbalock);
7437                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7438                 spin_unlock_irq(&phba->hbalock);
7439         }
7440         return rc;
7441 }
7442
7443 /**
7444  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7445  * @phba: Pointer to HBA context object.
7446  *
7447  * The function unblocks and resume posting of SLI4 asynchronous mailbox
7448  * commands from the driver internal pending mailbox queue. It makes sure
7449  * that there is no outstanding mailbox command before resuming posting
7450  * asynchronous mailbox commands. If, for any reason, there is outstanding
7451  * mailbox command, it will try to wait it out before resuming asynchronous
7452  * mailbox command posting.
7453  **/
7454 static void
7455 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7456 {
7457         struct lpfc_sli *psli = &phba->sli;
7458
7459         spin_lock_irq(&phba->hbalock);
7460         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7461                 /* Asynchronous mailbox posting is not blocked, do nothing */
7462                 spin_unlock_irq(&phba->hbalock);
7463                 return;
7464         }
7465
7466         /* Outstanding synchronous mailbox command is guaranteed to be done,
7467          * successful or timeout, after timing-out the outstanding mailbox
7468          * command shall always be removed, so just unblock posting async
7469          * mailbox command and resume
7470          */
7471         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7472         spin_unlock_irq(&phba->hbalock);
7473
7474         /* wake up worker thread to post asynchronlous mailbox command */
7475         lpfc_worker_wake_up(phba);
7476 }
7477
7478 /**
7479  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7480  * @phba: Pointer to HBA context object.
7481  * @mboxq: Pointer to mailbox object.
7482  *
7483  * The function waits for the bootstrap mailbox register ready bit from
7484  * port for twice the regular mailbox command timeout value.
7485  *
7486  *      0 - no timeout on waiting for bootstrap mailbox register ready.
7487  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7488  **/
7489 static int
7490 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7491 {
7492         uint32_t db_ready;
7493         unsigned long timeout;
7494         struct lpfc_register bmbx_reg;
7495
7496         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7497                                    * 1000) + jiffies;
7498
7499         do {
7500                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7501                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7502                 if (!db_ready)
7503                         msleep(2);
7504
7505                 if (time_after(jiffies, timeout))
7506                         return MBXERR_ERROR;
7507         } while (!db_ready);
7508
7509         return 0;
7510 }
7511
7512 /**
7513  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7514  * @phba: Pointer to HBA context object.
7515  * @mboxq: Pointer to mailbox object.
7516  *
7517  * The function posts a mailbox to the port.  The mailbox is expected
7518  * to be comletely filled in and ready for the port to operate on it.
7519  * This routine executes a synchronous completion operation on the
7520  * mailbox by polling for its completion.
7521  *
7522  * The caller must not be holding any locks when calling this routine.
7523  *
7524  * Returns:
7525  *      MBX_SUCCESS - mailbox posted successfully
7526  *      Any of the MBX error values.
7527  **/
7528 static int
7529 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7530 {
7531         int rc = MBX_SUCCESS;
7532         unsigned long iflag;
7533         uint32_t mcqe_status;
7534         uint32_t mbx_cmnd;
7535         struct lpfc_sli *psli = &phba->sli;
7536         struct lpfc_mqe *mb = &mboxq->u.mqe;
7537         struct lpfc_bmbx_create *mbox_rgn;
7538         struct dma_address *dma_address;
7539
7540         /*
7541          * Only one mailbox can be active to the bootstrap mailbox region
7542          * at a time and there is no queueing provided.
7543          */
7544         spin_lock_irqsave(&phba->hbalock, iflag);
7545         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7546                 spin_unlock_irqrestore(&phba->hbalock, iflag);
7547                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7548                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7549                                 "cannot issue Data: x%x x%x\n",
7550                                 mboxq->vport ? mboxq->vport->vpi : 0,
7551                                 mboxq->u.mb.mbxCommand,
7552                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7553                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7554                                 psli->sli_flag, MBX_POLL);
7555                 return MBXERR_ERROR;
7556         }
7557         /* The server grabs the token and owns it until release */
7558         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7559         phba->sli.mbox_active = mboxq;
7560         spin_unlock_irqrestore(&phba->hbalock, iflag);
7561
7562         /* wait for bootstrap mbox register for readyness */
7563         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7564         if (rc)
7565                 goto exit;
7566
7567         /*
7568          * Initialize the bootstrap memory region to avoid stale data areas
7569          * in the mailbox post.  Then copy the caller's mailbox contents to
7570          * the bmbx mailbox region.
7571          */
7572         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7573         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7574         lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7575                               sizeof(struct lpfc_mqe));
7576
7577         /* Post the high mailbox dma address to the port and wait for ready. */
7578         dma_address = &phba->sli4_hba.bmbx.dma_address;
7579         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7580
7581         /* wait for bootstrap mbox register for hi-address write done */
7582         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7583         if (rc)
7584                 goto exit;
7585
7586         /* Post the low mailbox dma address to the port. */
7587         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7588
7589         /* wait for bootstrap mbox register for low address write done */
7590         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7591         if (rc)
7592                 goto exit;
7593
7594         /*
7595          * Read the CQ to ensure the mailbox has completed.
7596          * If so, update the mailbox status so that the upper layers
7597          * can complete the request normally.
7598          */
7599         lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7600                               sizeof(struct lpfc_mqe));
7601         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7602         lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7603                               sizeof(struct lpfc_mcqe));
7604         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7605         /*
7606          * When the CQE status indicates a failure and the mailbox status
7607          * indicates success then copy the CQE status into the mailbox status
7608          * (and prefix it with x4000).
7609          */
7610         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7611                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7612                         bf_set(lpfc_mqe_status, mb,
7613                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
7614                 rc = MBXERR_ERROR;
7615         } else
7616                 lpfc_sli4_swap_str(phba, mboxq);
7617
7618         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7619                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7620                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7621                         " x%x x%x CQ: x%x x%x x%x x%x\n",
7622                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7623                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7624                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7625                         bf_get(lpfc_mqe_status, mb),
7626                         mb->un.mb_words[0], mb->un.mb_words[1],
7627                         mb->un.mb_words[2], mb->un.mb_words[3],
7628                         mb->un.mb_words[4], mb->un.mb_words[5],
7629                         mb->un.mb_words[6], mb->un.mb_words[7],
7630                         mb->un.mb_words[8], mb->un.mb_words[9],
7631                         mb->un.mb_words[10], mb->un.mb_words[11],
7632                         mb->un.mb_words[12], mboxq->mcqe.word0,
7633                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
7634                         mboxq->mcqe.trailer);
7635 exit:
7636         /* We are holding the token, no needed for lock when release */
7637         spin_lock_irqsave(&phba->hbalock, iflag);
7638         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7639         phba->sli.mbox_active = NULL;
7640         spin_unlock_irqrestore(&phba->hbalock, iflag);
7641         return rc;
7642 }
7643
7644 /**
7645  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7646  * @phba: Pointer to HBA context object.
7647  * @pmbox: Pointer to mailbox object.
7648  * @flag: Flag indicating how the mailbox need to be processed.
7649  *
7650  * This function is called by discovery code and HBA management code to submit
7651  * a mailbox command to firmware with SLI-4 interface spec.
7652  *
7653  * Return codes the caller owns the mailbox command after the return of the
7654  * function.
7655  **/
7656 static int
7657 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7658                        uint32_t flag)
7659 {
7660         struct lpfc_sli *psli = &phba->sli;
7661         unsigned long iflags;
7662         int rc;
7663
7664         /* dump from issue mailbox command if setup */
7665         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7666
7667         rc = lpfc_mbox_dev_check(phba);
7668         if (unlikely(rc)) {
7669                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7670                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7671                                 "cannot issue Data: x%x x%x\n",
7672                                 mboxq->vport ? mboxq->vport->vpi : 0,
7673                                 mboxq->u.mb.mbxCommand,
7674                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7675                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7676                                 psli->sli_flag, flag);
7677                 goto out_not_finished;
7678         }
7679
7680         /* Detect polling mode and jump to a handler */
7681         if (!phba->sli4_hba.intr_enable) {
7682                 if (flag == MBX_POLL)
7683                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7684                 else
7685                         rc = -EIO;
7686                 if (rc != MBX_SUCCESS)
7687                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7688                                         "(%d):2541 Mailbox command x%x "
7689                                         "(x%x/x%x) failure: "
7690                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7691                                         "Data: x%x x%x\n,",
7692                                         mboxq->vport ? mboxq->vport->vpi : 0,
7693                                         mboxq->u.mb.mbxCommand,
7694                                         lpfc_sli_config_mbox_subsys_get(phba,
7695                                                                         mboxq),
7696                                         lpfc_sli_config_mbox_opcode_get(phba,
7697                                                                         mboxq),
7698                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7699                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7700                                         bf_get(lpfc_mcqe_ext_status,
7701                                                &mboxq->mcqe),
7702                                         psli->sli_flag, flag);
7703                 return rc;
7704         } else if (flag == MBX_POLL) {
7705                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7706                                 "(%d):2542 Try to issue mailbox command "
7707                                 "x%x (x%x/x%x) synchronously ahead of async"
7708                                 "mailbox command queue: x%x x%x\n",
7709                                 mboxq->vport ? mboxq->vport->vpi : 0,
7710                                 mboxq->u.mb.mbxCommand,
7711                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7712                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7713                                 psli->sli_flag, flag);
7714                 /* Try to block the asynchronous mailbox posting */
7715                 rc = lpfc_sli4_async_mbox_block(phba);
7716                 if (!rc) {
7717                         /* Successfully blocked, now issue sync mbox cmd */
7718                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7719                         if (rc != MBX_SUCCESS)
7720                                 lpfc_printf_log(phba, KERN_WARNING,
7721                                         LOG_MBOX | LOG_SLI,
7722                                         "(%d):2597 Sync Mailbox command "
7723                                         "x%x (x%x/x%x) failure: "
7724                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
7725                                         "Data: x%x x%x\n,",
7726                                         mboxq->vport ? mboxq->vport->vpi : 0,
7727                                         mboxq->u.mb.mbxCommand,
7728                                         lpfc_sli_config_mbox_subsys_get(phba,
7729                                                                         mboxq),
7730                                         lpfc_sli_config_mbox_opcode_get(phba,
7731                                                                         mboxq),
7732                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7733                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7734                                         bf_get(lpfc_mcqe_ext_status,
7735                                                &mboxq->mcqe),
7736                                         psli->sli_flag, flag);
7737                         /* Unblock the async mailbox posting afterward */
7738                         lpfc_sli4_async_mbox_unblock(phba);
7739                 }
7740                 return rc;
7741         }
7742
7743         /* Now, interrupt mode asynchrous mailbox command */
7744         rc = lpfc_mbox_cmd_check(phba, mboxq);
7745         if (rc) {
7746                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7747                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7748                                 "cannot issue Data: x%x x%x\n",
7749                                 mboxq->vport ? mboxq->vport->vpi : 0,
7750                                 mboxq->u.mb.mbxCommand,
7751                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7752                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7753                                 psli->sli_flag, flag);
7754                 goto out_not_finished;
7755         }
7756
7757         /* Put the mailbox command to the driver internal FIFO */
7758         psli->slistat.mbox_busy++;
7759         spin_lock_irqsave(&phba->hbalock, iflags);
7760         lpfc_mbox_put(phba, mboxq);
7761         spin_unlock_irqrestore(&phba->hbalock, iflags);
7762         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7763                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
7764                         "x%x (x%x/x%x) x%x x%x x%x\n",
7765                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7766                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7767                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7768                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7769                         phba->pport->port_state,
7770                         psli->sli_flag, MBX_NOWAIT);
7771         /* Wake up worker thread to transport mailbox command from head */
7772         lpfc_worker_wake_up(phba);
7773
7774         return MBX_BUSY;
7775
7776 out_not_finished:
7777         return MBX_NOT_FINISHED;
7778 }
7779
7780 /**
7781  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7782  * @phba: Pointer to HBA context object.
7783  *
7784  * This function is called by worker thread to send a mailbox command to
7785  * SLI4 HBA firmware.
7786  *
7787  **/
7788 int
7789 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7790 {
7791         struct lpfc_sli *psli = &phba->sli;
7792         LPFC_MBOXQ_t *mboxq;
7793         int rc = MBX_SUCCESS;
7794         unsigned long iflags;
7795         struct lpfc_mqe *mqe;
7796         uint32_t mbx_cmnd;
7797
7798         /* Check interrupt mode before post async mailbox command */
7799         if (unlikely(!phba->sli4_hba.intr_enable))
7800                 return MBX_NOT_FINISHED;
7801
7802         /* Check for mailbox command service token */
7803         spin_lock_irqsave(&phba->hbalock, iflags);
7804         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7805                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7806                 return MBX_NOT_FINISHED;
7807         }
7808         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7809                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7810                 return MBX_NOT_FINISHED;
7811         }
7812         if (unlikely(phba->sli.mbox_active)) {
7813                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7814                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7815                                 "0384 There is pending active mailbox cmd\n");
7816                 return MBX_NOT_FINISHED;
7817         }
7818         /* Take the mailbox command service token */
7819         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7820
7821         /* Get the next mailbox command from head of queue */
7822         mboxq = lpfc_mbox_get(phba);
7823
7824         /* If no more mailbox command waiting for post, we're done */
7825         if (!mboxq) {
7826                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7827                 spin_unlock_irqrestore(&phba->hbalock, iflags);
7828                 return MBX_SUCCESS;
7829         }
7830         phba->sli.mbox_active = mboxq;
7831         spin_unlock_irqrestore(&phba->hbalock, iflags);
7832
7833         /* Check device readiness for posting mailbox command */
7834         rc = lpfc_mbox_dev_check(phba);
7835         if (unlikely(rc))
7836                 /* Driver clean routine will clean up pending mailbox */
7837                 goto out_not_finished;
7838
7839         /* Prepare the mbox command to be posted */
7840         mqe = &mboxq->u.mqe;
7841         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7842
7843         /* Start timer for the mbox_tmo and log some mailbox post messages */
7844         mod_timer(&psli->mbox_tmo, (jiffies +
7845                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7846
7847         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7848                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7849                         "x%x x%x\n",
7850                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7851                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7852                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7853                         phba->pport->port_state, psli->sli_flag);
7854
7855         if (mbx_cmnd != MBX_HEARTBEAT) {
7856                 if (mboxq->vport) {
7857                         lpfc_debugfs_disc_trc(mboxq->vport,
7858                                 LPFC_DISC_TRC_MBOX_VPORT,
7859                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7860                                 mbx_cmnd, mqe->un.mb_words[0],
7861                                 mqe->un.mb_words[1]);
7862                 } else {
7863                         lpfc_debugfs_disc_trc(phba->pport,
7864                                 LPFC_DISC_TRC_MBOX,
7865                                 "MBOX Send: cmd:x%x mb:x%x x%x",
7866                                 mbx_cmnd, mqe->un.mb_words[0],
7867                                 mqe->un.mb_words[1]);
7868                 }
7869         }
7870         psli->slistat.mbox_cmd++;
7871
7872         /* Post the mailbox command to the port */
7873         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7874         if (rc != MBX_SUCCESS) {
7875                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7876                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7877                                 "cannot issue Data: x%x x%x\n",
7878                                 mboxq->vport ? mboxq->vport->vpi : 0,
7879                                 mboxq->u.mb.mbxCommand,
7880                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7881                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7882                                 psli->sli_flag, MBX_NOWAIT);
7883                 goto out_not_finished;
7884         }
7885
7886         return rc;
7887
7888 out_not_finished:
7889         spin_lock_irqsave(&phba->hbalock, iflags);
7890         if (phba->sli.mbox_active) {
7891                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7892                 __lpfc_mbox_cmpl_put(phba, mboxq);
7893                 /* Release the token */
7894                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7895                 phba->sli.mbox_active = NULL;
7896         }
7897         spin_unlock_irqrestore(&phba->hbalock, iflags);
7898
7899         return MBX_NOT_FINISHED;
7900 }
7901
7902 /**
7903  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7904  * @phba: Pointer to HBA context object.
7905  * @pmbox: Pointer to mailbox object.
7906  * @flag: Flag indicating how the mailbox need to be processed.
7907  *
7908  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7909  * the API jump table function pointer from the lpfc_hba struct.
7910  *
7911  * Return codes the caller owns the mailbox command after the return of the
7912  * function.
7913  **/
7914 int
7915 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7916 {
7917         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7918 }
7919
7920 /**
7921  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7922  * @phba: The hba struct for which this call is being executed.
7923  * @dev_grp: The HBA PCI-Device group number.
7924  *
7925  * This routine sets up the mbox interface API function jump table in @phba
7926  * struct.
7927  * Returns: 0 - success, -ENODEV - failure.
7928  **/
7929 int
7930 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7931 {
7932
7933         switch (dev_grp) {
7934         case LPFC_PCI_DEV_LP:
7935                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7936                 phba->lpfc_sli_handle_slow_ring_event =
7937                                 lpfc_sli_handle_slow_ring_event_s3;
7938                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7939                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7940                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7941                 break;
7942         case LPFC_PCI_DEV_OC:
7943                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7944                 phba->lpfc_sli_handle_slow_ring_event =
7945                                 lpfc_sli_handle_slow_ring_event_s4;
7946                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7947                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7948                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7949                 break;
7950         default:
7951                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7952                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
7953                                 dev_grp);
7954                 return -ENODEV;
7955                 break;
7956         }
7957         return 0;
7958 }
7959
7960 /**
7961  * __lpfc_sli_ringtx_put - Add an iocb to the txq
7962  * @phba: Pointer to HBA context object.
7963  * @pring: Pointer to driver SLI ring object.
7964  * @piocb: Pointer to address of newly added command iocb.
7965  *
7966  * This function is called with hbalock held to add a command
7967  * iocb to the txq when SLI layer cannot submit the command iocb
7968  * to the ring.
7969  **/
7970 void
7971 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7972                     struct lpfc_iocbq *piocb)
7973 {
7974         lockdep_assert_held(&phba->hbalock);
7975         /* Insert the caller's iocb in the txq tail for later processing. */
7976         list_add_tail(&piocb->list, &pring->txq);
7977 }
7978
7979 /**
7980  * lpfc_sli_next_iocb - Get the next iocb in the txq
7981  * @phba: Pointer to HBA context object.
7982  * @pring: Pointer to driver SLI ring object.
7983  * @piocb: Pointer to address of newly added command iocb.
7984  *
7985  * This function is called with hbalock held before a new
7986  * iocb is submitted to the firmware. This function checks
7987  * txq to flush the iocbs in txq to Firmware before
7988  * submitting new iocbs to the Firmware.
7989  * If there are iocbs in the txq which need to be submitted
7990  * to firmware, lpfc_sli_next_iocb returns the first element
7991  * of the txq after dequeuing it from txq.
7992  * If there is no iocb in the txq then the function will return
7993  * *piocb and *piocb is set to NULL. Caller needs to check
7994  * *piocb to find if there are more commands in the txq.
7995  **/
7996 static struct lpfc_iocbq *
7997 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7998                    struct lpfc_iocbq **piocb)
7999 {
8000         struct lpfc_iocbq * nextiocb;
8001
8002         lockdep_assert_held(&phba->hbalock);
8003
8004         nextiocb = lpfc_sli_ringtx_get(phba, pring);
8005         if (!nextiocb) {
8006                 nextiocb = *piocb;
8007                 *piocb = NULL;
8008         }
8009
8010         return nextiocb;
8011 }
8012
8013 /**
8014  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8015  * @phba: Pointer to HBA context object.
8016  * @ring_number: SLI ring number to issue iocb on.
8017  * @piocb: Pointer to command iocb.
8018  * @flag: Flag indicating if this command can be put into txq.
8019  *
8020  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8021  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8022  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8023  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8024  * this function allows only iocbs for posting buffers. This function finds
8025  * next available slot in the command ring and posts the command to the
8026  * available slot and writes the port attention register to request HBA start
8027  * processing new iocb. If there is no slot available in the ring and
8028  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8029  * the function returns IOCB_BUSY.
8030  *
8031  * This function is called with hbalock held. The function will return success
8032  * after it successfully submit the iocb to firmware or after adding to the
8033  * txq.
8034  **/
8035 static int
8036 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8037                     struct lpfc_iocbq *piocb, uint32_t flag)
8038 {
8039         struct lpfc_iocbq *nextiocb;
8040         IOCB_t *iocb;
8041         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8042
8043         lockdep_assert_held(&phba->hbalock);
8044
8045         if (piocb->iocb_cmpl && (!piocb->vport) &&
8046            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8047            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8048                 lpfc_printf_log(phba, KERN_ERR,
8049                                 LOG_SLI | LOG_VPORT,
8050                                 "1807 IOCB x%x failed. No vport\n",
8051                                 piocb->iocb.ulpCommand);
8052                 dump_stack();
8053                 return IOCB_ERROR;
8054         }
8055
8056
8057         /* If the PCI channel is in offline state, do not post iocbs. */
8058         if (unlikely(pci_channel_offline(phba->pcidev)))
8059                 return IOCB_ERROR;
8060
8061         /* If HBA has a deferred error attention, fail the iocb. */
8062         if (unlikely(phba->hba_flag & DEFER_ERATT))
8063                 return IOCB_ERROR;
8064
8065         /*
8066          * We should never get an IOCB if we are in a < LINK_DOWN state
8067          */
8068         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8069                 return IOCB_ERROR;
8070
8071         /*
8072          * Check to see if we are blocking IOCB processing because of a
8073          * outstanding event.
8074          */
8075         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8076                 goto iocb_busy;
8077
8078         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8079                 /*
8080                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8081                  * can be issued if the link is not up.
8082                  */
8083                 switch (piocb->iocb.ulpCommand) {
8084                 case CMD_GEN_REQUEST64_CR:
8085                 case CMD_GEN_REQUEST64_CX:
8086                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8087                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8088                                         FC_RCTL_DD_UNSOL_CMD) ||
8089                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8090                                         MENLO_TRANSPORT_TYPE))
8091
8092                                 goto iocb_busy;
8093                         break;
8094                 case CMD_QUE_RING_BUF_CN:
8095                 case CMD_QUE_RING_BUF64_CN:
8096                         /*
8097                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8098                          * completion, iocb_cmpl MUST be 0.
8099                          */
8100                         if (piocb->iocb_cmpl)
8101                                 piocb->iocb_cmpl = NULL;
8102                         /*FALLTHROUGH*/
8103                 case CMD_CREATE_XRI_CR:
8104                 case CMD_CLOSE_XRI_CN:
8105                 case CMD_CLOSE_XRI_CX:
8106                         break;
8107                 default:
8108                         goto iocb_busy;
8109                 }
8110
8111         /*
8112          * For FCP commands, we must be in a state where we can process link
8113          * attention events.
8114          */
8115         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
8116                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8117                 goto iocb_busy;
8118         }
8119
8120         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8121                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8122                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8123
8124         if (iocb)
8125                 lpfc_sli_update_ring(phba, pring);
8126         else
8127                 lpfc_sli_update_full_ring(phba, pring);
8128
8129         if (!piocb)
8130                 return IOCB_SUCCESS;
8131
8132         goto out_busy;
8133
8134  iocb_busy:
8135         pring->stats.iocb_cmd_delay++;
8136
8137  out_busy:
8138
8139         if (!(flag & SLI_IOCB_RET_IOCB)) {
8140                 __lpfc_sli_ringtx_put(phba, pring, piocb);
8141                 return IOCB_SUCCESS;
8142         }
8143
8144         return IOCB_BUSY;
8145 }
8146
8147 /**
8148  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8149  * @phba: Pointer to HBA context object.
8150  * @piocb: Pointer to command iocb.
8151  * @sglq: Pointer to the scatter gather queue object.
8152  *
8153  * This routine converts the bpl or bde that is in the IOCB
8154  * to a sgl list for the sli4 hardware. The physical address
8155  * of the bpl/bde is converted back to a virtual address.
8156  * If the IOCB contains a BPL then the list of BDE's is
8157  * converted to sli4_sge's. If the IOCB contains a single
8158  * BDE then it is converted to a single sli_sge.
8159  * The IOCB is still in cpu endianess so the contents of
8160  * the bpl can be used without byte swapping.
8161  *
8162  * Returns valid XRI = Success, NO_XRI = Failure.
8163 **/
8164 static uint16_t
8165 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8166                 struct lpfc_sglq *sglq)
8167 {
8168         uint16_t xritag = NO_XRI;
8169         struct ulp_bde64 *bpl = NULL;
8170         struct ulp_bde64 bde;
8171         struct sli4_sge *sgl  = NULL;
8172         struct lpfc_dmabuf *dmabuf;
8173         IOCB_t *icmd;
8174         int numBdes = 0;
8175         int i = 0;
8176         uint32_t offset = 0; /* accumulated offset in the sg request list */
8177         int inbound = 0; /* number of sg reply entries inbound from firmware */
8178
8179         if (!piocbq || !sglq)
8180                 return xritag;
8181
8182         sgl  = (struct sli4_sge *)sglq->sgl;
8183         icmd = &piocbq->iocb;
8184         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8185                 return sglq->sli4_xritag;
8186         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8187                 numBdes = icmd->un.genreq64.bdl.bdeSize /
8188                                 sizeof(struct ulp_bde64);
8189                 /* The addrHigh and addrLow fields within the IOCB
8190                  * have not been byteswapped yet so there is no
8191                  * need to swap them back.
8192                  */
8193                 if (piocbq->context3)
8194                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8195                 else
8196                         return xritag;
8197
8198                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
8199                 if (!bpl)
8200                         return xritag;
8201
8202                 for (i = 0; i < numBdes; i++) {
8203                         /* Should already be byte swapped. */
8204                         sgl->addr_hi = bpl->addrHigh;
8205                         sgl->addr_lo = bpl->addrLow;
8206
8207                         sgl->word2 = le32_to_cpu(sgl->word2);
8208                         if ((i+1) == numBdes)
8209                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
8210                         else
8211                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
8212                         /* swap the size field back to the cpu so we
8213                          * can assign it to the sgl.
8214                          */
8215                         bde.tus.w = le32_to_cpu(bpl->tus.w);
8216                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8217                         /* The offsets in the sgl need to be accumulated
8218                          * separately for the request and reply lists.
8219                          * The request is always first, the reply follows.
8220                          */
8221                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8222                                 /* add up the reply sg entries */
8223                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8224                                         inbound++;
8225                                 /* first inbound? reset the offset */
8226                                 if (inbound == 1)
8227                                         offset = 0;
8228                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8229                                 bf_set(lpfc_sli4_sge_type, sgl,
8230                                         LPFC_SGE_TYPE_DATA);
8231                                 offset += bde.tus.f.bdeSize;
8232                         }
8233                         sgl->word2 = cpu_to_le32(sgl->word2);
8234                         bpl++;
8235                         sgl++;
8236                 }
8237         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8238                         /* The addrHigh and addrLow fields of the BDE have not
8239                          * been byteswapped yet so they need to be swapped
8240                          * before putting them in the sgl.
8241                          */
8242                         sgl->addr_hi =
8243                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8244                         sgl->addr_lo =
8245                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8246                         sgl->word2 = le32_to_cpu(sgl->word2);
8247                         bf_set(lpfc_sli4_sge_last, sgl, 1);
8248                         sgl->word2 = cpu_to_le32(sgl->word2);
8249                         sgl->sge_len =
8250                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8251         }
8252         return sglq->sli4_xritag;
8253 }
8254
8255 /**
8256  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8257  * @phba: Pointer to HBA context object.
8258  * @piocb: Pointer to command iocb.
8259  * @wqe: Pointer to the work queue entry.
8260  *
8261  * This routine converts the iocb command to its Work Queue Entry
8262  * equivalent. The wqe pointer should not have any fields set when
8263  * this routine is called because it will memcpy over them.
8264  * This routine does not set the CQ_ID or the WQEC bits in the
8265  * wqe.
8266  *
8267  * Returns: 0 = Success, IOCB_ERROR = Failure.
8268  **/
8269 static int
8270 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8271                 union lpfc_wqe *wqe)
8272 {
8273         uint32_t xmit_len = 0, total_len = 0;
8274         uint8_t ct = 0;
8275         uint32_t fip;
8276         uint32_t abort_tag;
8277         uint8_t command_type = ELS_COMMAND_NON_FIP;
8278         uint8_t cmnd;
8279         uint16_t xritag;
8280         uint16_t abrt_iotag;
8281         struct lpfc_iocbq *abrtiocbq;
8282         struct ulp_bde64 *bpl = NULL;
8283         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8284         int numBdes, i;
8285         struct ulp_bde64 bde;
8286         struct lpfc_nodelist *ndlp;
8287         uint32_t *pcmd;
8288         uint32_t if_type;
8289
8290         fip = phba->hba_flag & HBA_FIP_SUPPORT;
8291         /* The fcp commands will set command type */
8292         if (iocbq->iocb_flag &  LPFC_IO_FCP)
8293                 command_type = FCP_COMMAND;
8294         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8295                 command_type = ELS_COMMAND_FIP;
8296         else
8297                 command_type = ELS_COMMAND_NON_FIP;
8298
8299         if (phba->fcp_embed_io)
8300                 memset(wqe, 0, sizeof(union lpfc_wqe128));
8301         /* Some of the fields are in the right position already */
8302         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8303         wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
8304         wqe->generic.wqe_com.word10 = 0;
8305
8306         abort_tag = (uint32_t) iocbq->iotag;
8307         xritag = iocbq->sli4_xritag;
8308         /* words0-2 bpl convert bde */
8309         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8310                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8311                                 sizeof(struct ulp_bde64);
8312                 bpl  = (struct ulp_bde64 *)
8313                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8314                 if (!bpl)
8315                         return IOCB_ERROR;
8316
8317                 /* Should already be byte swapped. */
8318                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
8319                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
8320                 /* swap the size field back to the cpu so we
8321                  * can assign it to the sgl.
8322                  */
8323                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
8324                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8325                 total_len = 0;
8326                 for (i = 0; i < numBdes; i++) {
8327                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
8328                         total_len += bde.tus.f.bdeSize;
8329                 }
8330         } else
8331                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8332
8333         iocbq->iocb.ulpIoTag = iocbq->iotag;
8334         cmnd = iocbq->iocb.ulpCommand;
8335
8336         switch (iocbq->iocb.ulpCommand) {
8337         case CMD_ELS_REQUEST64_CR:
8338                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8339                         ndlp = iocbq->context_un.ndlp;
8340                 else
8341                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
8342                 if (!iocbq->iocb.ulpLe) {
8343                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8344                                 "2007 Only Limited Edition cmd Format"
8345                                 " supported 0x%x\n",
8346                                 iocbq->iocb.ulpCommand);
8347                         return IOCB_ERROR;
8348                 }
8349
8350                 wqe->els_req.payload_len = xmit_len;
8351                 /* Els_reguest64 has a TMO */
8352                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8353                         iocbq->iocb.ulpTimeout);
8354                 /* Need a VF for word 4 set the vf bit*/
8355                 bf_set(els_req64_vf, &wqe->els_req, 0);
8356                 /* And a VFID for word 12 */
8357                 bf_set(els_req64_vfid, &wqe->els_req, 0);
8358                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8359                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8360                        iocbq->iocb.ulpContext);
8361                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8362                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8363                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8364                 if (command_type == ELS_COMMAND_FIP)
8365                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8366                                         >> LPFC_FIP_ELS_ID_SHIFT);
8367                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8368                                         iocbq->context2)->virt);
8369                 if_type = bf_get(lpfc_sli_intf_if_type,
8370                                         &phba->sli4_hba.sli_intf);
8371                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8372                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8373                                 *pcmd == ELS_CMD_SCR ||
8374                                 *pcmd == ELS_CMD_FDISC ||
8375                                 *pcmd == ELS_CMD_LOGO ||
8376                                 *pcmd == ELS_CMD_PLOGI)) {
8377                                 bf_set(els_req64_sp, &wqe->els_req, 1);
8378                                 bf_set(els_req64_sid, &wqe->els_req,
8379                                         iocbq->vport->fc_myDID);
8380                                 if ((*pcmd == ELS_CMD_FLOGI) &&
8381                                         !(phba->fc_topology ==
8382                                                 LPFC_TOPOLOGY_LOOP))
8383                                         bf_set(els_req64_sid, &wqe->els_req, 0);
8384                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8385                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8386                                         phba->vpi_ids[iocbq->vport->vpi]);
8387                         } else if (pcmd && iocbq->context1) {
8388                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8389                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8390                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8391                         }
8392                 }
8393                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8394                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8395                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8396                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8397                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8398                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8399                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8400                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8401                 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8402                 break;
8403         case CMD_XMIT_SEQUENCE64_CX:
8404                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8405                        iocbq->iocb.un.ulpWord[3]);
8406                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8407                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8408                 /* The entire sequence is transmitted for this IOCB */
8409                 xmit_len = total_len;
8410                 cmnd = CMD_XMIT_SEQUENCE64_CR;
8411                 if (phba->link_flag & LS_LOOPBACK_MODE)
8412                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8413         case CMD_XMIT_SEQUENCE64_CR:
8414                 /* word3 iocb=io_tag32 wqe=reserved */
8415                 wqe->xmit_sequence.rsvd3 = 0;
8416                 /* word4 relative_offset memcpy */
8417                 /* word5 r_ctl/df_ctl memcpy */
8418                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8419                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8420                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8421                        LPFC_WQE_IOD_WRITE);
8422                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8423                        LPFC_WQE_LENLOC_WORD12);
8424                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8425                 wqe->xmit_sequence.xmit_len = xmit_len;
8426                 command_type = OTHER_COMMAND;
8427                 break;
8428         case CMD_XMIT_BCAST64_CN:
8429                 /* word3 iocb=iotag32 wqe=seq_payload_len */
8430                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8431                 /* word4 iocb=rsvd wqe=rsvd */
8432                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8433                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8434                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8435                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8436                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8437                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8438                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8439                        LPFC_WQE_LENLOC_WORD3);
8440                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8441                 break;
8442         case CMD_FCP_IWRITE64_CR:
8443                 command_type = FCP_COMMAND_DATA_OUT;
8444                 /* word3 iocb=iotag wqe=payload_offset_len */
8445                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8446                 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8447                        xmit_len + sizeof(struct fcp_rsp));
8448                 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8449                        0);
8450                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8451                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8452                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8453                        iocbq->iocb.ulpFCP2Rcvy);
8454                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8455                 /* Always open the exchange */
8456                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8457                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8458                        LPFC_WQE_LENLOC_WORD4);
8459                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8460                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8461                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8462                         bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8463                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8464                         if (iocbq->priority) {
8465                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8466                                        (iocbq->priority << 1));
8467                         } else {
8468                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8469                                        (phba->cfg_XLanePriority << 1));
8470                         }
8471                 }
8472                 /* Note, word 10 is already initialized to 0 */
8473
8474                 if (phba->fcp_embed_io) {
8475                         struct lpfc_scsi_buf *lpfc_cmd;
8476                         struct sli4_sge *sgl;
8477                         union lpfc_wqe128 *wqe128;
8478                         struct fcp_cmnd *fcp_cmnd;
8479                         uint32_t *ptr;
8480
8481                         /* 128 byte wqe support here */
8482                         wqe128 = (union lpfc_wqe128 *)wqe;
8483
8484                         lpfc_cmd = iocbq->context1;
8485                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8486                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
8487
8488                         /* Word 0-2 - FCP_CMND */
8489                         wqe128->generic.bde.tus.f.bdeFlags =
8490                                 BUFF_TYPE_BDE_IMMED;
8491                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8492                         wqe128->generic.bde.addrHigh = 0;
8493                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8494
8495                         bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8496
8497                         /* Word 22-29  FCP CMND Payload */
8498                         ptr = &wqe128->words[22];
8499                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8500                 }
8501                 break;
8502         case CMD_FCP_IREAD64_CR:
8503                 /* word3 iocb=iotag wqe=payload_offset_len */
8504                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8505                 bf_set(payload_offset_len, &wqe->fcp_iread,
8506                        xmit_len + sizeof(struct fcp_rsp));
8507                 bf_set(cmd_buff_len, &wqe->fcp_iread,
8508                        0);
8509                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8510                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8511                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8512                        iocbq->iocb.ulpFCP2Rcvy);
8513                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8514                 /* Always open the exchange */
8515                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8516                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8517                        LPFC_WQE_LENLOC_WORD4);
8518                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8519                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8520                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8521                         bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8522                         bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8523                         if (iocbq->priority) {
8524                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8525                                        (iocbq->priority << 1));
8526                         } else {
8527                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8528                                        (phba->cfg_XLanePriority << 1));
8529                         }
8530                 }
8531                 /* Note, word 10 is already initialized to 0 */
8532
8533                 if (phba->fcp_embed_io) {
8534                         struct lpfc_scsi_buf *lpfc_cmd;
8535                         struct sli4_sge *sgl;
8536                         union lpfc_wqe128 *wqe128;
8537                         struct fcp_cmnd *fcp_cmnd;
8538                         uint32_t *ptr;
8539
8540                         /* 128 byte wqe support here */
8541                         wqe128 = (union lpfc_wqe128 *)wqe;
8542
8543                         lpfc_cmd = iocbq->context1;
8544                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8545                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
8546
8547                         /* Word 0-2 - FCP_CMND */
8548                         wqe128->generic.bde.tus.f.bdeFlags =
8549                                 BUFF_TYPE_BDE_IMMED;
8550                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8551                         wqe128->generic.bde.addrHigh = 0;
8552                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8553
8554                         bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8555
8556                         /* Word 22-29  FCP CMND Payload */
8557                         ptr = &wqe128->words[22];
8558                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8559                 }
8560                 break;
8561         case CMD_FCP_ICMND64_CR:
8562                 /* word3 iocb=iotag wqe=payload_offset_len */
8563                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8564                 bf_set(payload_offset_len, &wqe->fcp_icmd,
8565                        xmit_len + sizeof(struct fcp_rsp));
8566                 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8567                        0);
8568                 /* word3 iocb=IO_TAG wqe=reserved */
8569                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8570                 /* Always open the exchange */
8571                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8572                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8573                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8574                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8575                        LPFC_WQE_LENLOC_NONE);
8576                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8577                        iocbq->iocb.ulpFCP2Rcvy);
8578                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8579                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8580                         bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8581                         if (iocbq->priority) {
8582                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8583                                        (iocbq->priority << 1));
8584                         } else {
8585                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8586                                        (phba->cfg_XLanePriority << 1));
8587                         }
8588                 }
8589                 /* Note, word 10 is already initialized to 0 */
8590
8591                 if (phba->fcp_embed_io) {
8592                         struct lpfc_scsi_buf *lpfc_cmd;
8593                         struct sli4_sge *sgl;
8594                         union lpfc_wqe128 *wqe128;
8595                         struct fcp_cmnd *fcp_cmnd;
8596                         uint32_t *ptr;
8597
8598                         /* 128 byte wqe support here */
8599                         wqe128 = (union lpfc_wqe128 *)wqe;
8600
8601                         lpfc_cmd = iocbq->context1;
8602                         sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8603                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
8604
8605                         /* Word 0-2 - FCP_CMND */
8606                         wqe128->generic.bde.tus.f.bdeFlags =
8607                                 BUFF_TYPE_BDE_IMMED;
8608                         wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8609                         wqe128->generic.bde.addrHigh = 0;
8610                         wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8611
8612                         bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8613
8614                         /* Word 22-29  FCP CMND Payload */
8615                         ptr = &wqe128->words[22];
8616                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8617                 }
8618                 break;
8619         case CMD_GEN_REQUEST64_CR:
8620                 /* For this command calculate the xmit length of the
8621                  * request bde.
8622                  */
8623                 xmit_len = 0;
8624                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8625                         sizeof(struct ulp_bde64);
8626                 for (i = 0; i < numBdes; i++) {
8627                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8628                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8629                                 break;
8630                         xmit_len += bde.tus.f.bdeSize;
8631                 }
8632                 /* word3 iocb=IO_TAG wqe=request_payload_len */
8633                 wqe->gen_req.request_payload_len = xmit_len;
8634                 /* word4 iocb=parameter wqe=relative_offset memcpy */
8635                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8636                 /* word6 context tag copied in memcpy */
8637                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
8638                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8639                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8640                                 "2015 Invalid CT %x command 0x%x\n",
8641                                 ct, iocbq->iocb.ulpCommand);
8642                         return IOCB_ERROR;
8643                 }
8644                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8645                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8646                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8647                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8648                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8649                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8650                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8651                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8652                 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
8653                 command_type = OTHER_COMMAND;
8654                 break;
8655         case CMD_XMIT_ELS_RSP64_CX:
8656                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8657                 /* words0-2 BDE memcpy */
8658                 /* word3 iocb=iotag32 wqe=response_payload_len */
8659                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8660                 /* word4 */
8661                 wqe->xmit_els_rsp.word4 = 0;
8662                 /* word5 iocb=rsvd wge=did */
8663                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8664                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
8665
8666                 if_type = bf_get(lpfc_sli_intf_if_type,
8667                                         &phba->sli4_hba.sli_intf);
8668                 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8669                         if (iocbq->vport->fc_flag & FC_PT2PT) {
8670                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8671                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8672                                         iocbq->vport->fc_myDID);
8673                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
8674                                         bf_set(wqe_els_did,
8675                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
8676                                 }
8677                         }
8678                 }
8679                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8680                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8681                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8682                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8683                        iocbq->iocb.unsli3.rcvsli3.ox_id);
8684                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8685                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8686                                phba->vpi_ids[iocbq->vport->vpi]);
8687                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8688                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8689                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8690                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8691                        LPFC_WQE_LENLOC_WORD3);
8692                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8693                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8694                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8695                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8696                                         iocbq->context2)->virt);
8697                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8698                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8699                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8700                                         iocbq->vport->fc_myDID);
8701                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8702                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8703                                         phba->vpi_ids[phba->pport->vpi]);
8704                 }
8705                 command_type = OTHER_COMMAND;
8706                 break;
8707         case CMD_CLOSE_XRI_CN:
8708         case CMD_ABORT_XRI_CN:
8709         case CMD_ABORT_XRI_CX:
8710                 /* words 0-2 memcpy should be 0 rserved */
8711                 /* port will send abts */
8712                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8713                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8714                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8715                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8716                 } else
8717                         fip = 0;
8718
8719                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8720                         /*
8721                          * The link is down, or the command was ELS_FIP
8722                          * so the fw does not need to send abts
8723                          * on the wire.
8724                          */
8725                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8726                 else
8727                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8728                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8729                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8730                 wqe->abort_cmd.rsrvd5 = 0;
8731                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8732                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8733                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8734                 /*
8735                  * The abort handler will send us CMD_ABORT_XRI_CN or
8736                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8737                  */
8738                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8739                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8740                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8741                        LPFC_WQE_LENLOC_NONE);
8742                 cmnd = CMD_ABORT_XRI_CX;
8743                 command_type = OTHER_COMMAND;
8744                 xritag = 0;
8745                 break;
8746         case CMD_XMIT_BLS_RSP64_CX:
8747                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8748                 /* As BLS ABTS RSP WQE is very different from other WQEs,
8749                  * we re-construct this WQE here based on information in
8750                  * iocbq from scratch.
8751                  */
8752                 memset(wqe, 0, sizeof(union lpfc_wqe));
8753                 /* OX_ID is invariable to who sent ABTS to CT exchange */
8754                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8755                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8756                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8757                     LPFC_ABTS_UNSOL_INT) {
8758                         /* ABTS sent by initiator to CT exchange, the
8759                          * RX_ID field will be filled with the newly
8760                          * allocated responder XRI.
8761                          */
8762                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8763                                iocbq->sli4_xritag);
8764                 } else {
8765                         /* ABTS sent by responder to CT exchange, the
8766                          * RX_ID field will be filled with the responder
8767                          * RX_ID from ABTS.
8768                          */
8769                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8770                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8771                 }
8772                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8773                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8774
8775                 /* Use CT=VPI */
8776                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8777                         ndlp->nlp_DID);
8778                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8779                         iocbq->iocb.ulpContext);
8780                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8781                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8782                         phba->vpi_ids[phba->pport->vpi]);
8783                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8784                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8785                        LPFC_WQE_LENLOC_NONE);
8786                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8787                 command_type = OTHER_COMMAND;
8788                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8789                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8790                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8791                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8792                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8793                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8794                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8795                 }
8796
8797                 break;
8798         case CMD_XRI_ABORTED_CX:
8799         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8800         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8801         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8802         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8803         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8804         default:
8805                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8806                                 "2014 Invalid command 0x%x\n",
8807                                 iocbq->iocb.ulpCommand);
8808                 return IOCB_ERROR;
8809                 break;
8810         }
8811
8812         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8813                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8814         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8815                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8816         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8817                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8818         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8819                               LPFC_IO_DIF_INSERT);
8820         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8821         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8822         wqe->generic.wqe_com.abort_tag = abort_tag;
8823         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8824         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8825         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8826         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8827         return 0;
8828 }
8829
8830 /**
8831  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8832  * @phba: Pointer to HBA context object.
8833  * @ring_number: SLI ring number to issue iocb on.
8834  * @piocb: Pointer to command iocb.
8835  * @flag: Flag indicating if this command can be put into txq.
8836  *
8837  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8838  * an iocb command to an HBA with SLI-4 interface spec.
8839  *
8840  * This function is called with hbalock held. The function will return success
8841  * after it successfully submit the iocb to firmware or after adding to the
8842  * txq.
8843  **/
8844 static int
8845 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8846                          struct lpfc_iocbq *piocb, uint32_t flag)
8847 {
8848         struct lpfc_sglq *sglq;
8849         union lpfc_wqe *wqe;
8850         union lpfc_wqe128 wqe128;
8851         struct lpfc_queue *wq;
8852         struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8853
8854         lockdep_assert_held(&phba->hbalock);
8855
8856         /*
8857          * The WQE can be either 64 or 128 bytes,
8858          * so allocate space on the stack assuming the largest.
8859          */
8860         wqe = (union lpfc_wqe *)&wqe128;
8861
8862         if (piocb->sli4_xritag == NO_XRI) {
8863                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8864                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8865                         sglq = NULL;
8866                 else {
8867                         if (!list_empty(&pring->txq)) {
8868                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
8869                                         __lpfc_sli_ringtx_put(phba,
8870                                                 pring, piocb);
8871                                         return IOCB_SUCCESS;
8872                                 } else {
8873                                         return IOCB_BUSY;
8874                                 }
8875                         } else {
8876                                 sglq = __lpfc_sli_get_sglq(phba, piocb);
8877                                 if (!sglq) {
8878                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
8879                                                 __lpfc_sli_ringtx_put(phba,
8880                                                                 pring,
8881                                                                 piocb);
8882                                                 return IOCB_SUCCESS;
8883                                         } else
8884                                                 return IOCB_BUSY;
8885                                 }
8886                         }
8887                 }
8888         } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
8889                 /* These IO's already have an XRI and a mapped sgl. */
8890                 sglq = NULL;
8891         } else {
8892                 /*
8893                  * This is a continuation of a commandi,(CX) so this
8894                  * sglq is on the active list
8895                  */
8896                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8897                 if (!sglq)
8898                         return IOCB_ERROR;
8899         }
8900
8901         if (sglq) {
8902                 piocb->sli4_lxritag = sglq->sli4_lxritag;
8903                 piocb->sli4_xritag = sglq->sli4_xritag;
8904                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8905                         return IOCB_ERROR;
8906         }
8907
8908         if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
8909                 return IOCB_ERROR;
8910
8911         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8912             (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8913                 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8914                         wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8915                 } else {
8916                         wq = phba->sli4_hba.oas_wq;
8917                 }
8918                 if (lpfc_sli4_wq_put(wq, wqe))
8919                         return IOCB_ERROR;
8920         } else {
8921                 if (unlikely(!phba->sli4_hba.els_wq))
8922                         return IOCB_ERROR;
8923                 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
8924                         return IOCB_ERROR;
8925         }
8926         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8927
8928         return 0;
8929 }
8930
8931 /**
8932  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8933  *
8934  * This routine wraps the actual lockless version for issusing IOCB function
8935  * pointer from the lpfc_hba struct.
8936  *
8937  * Return codes:
8938  * IOCB_ERROR - Error
8939  * IOCB_SUCCESS - Success
8940  * IOCB_BUSY - Busy
8941  **/
8942 int
8943 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8944                 struct lpfc_iocbq *piocb, uint32_t flag)
8945 {
8946         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8947 }
8948
8949 /**
8950  * lpfc_sli_api_table_setup - Set up sli api function jump table
8951  * @phba: The hba struct for which this call is being executed.
8952  * @dev_grp: The HBA PCI-Device group number.
8953  *
8954  * This routine sets up the SLI interface API function jump table in @phba
8955  * struct.
8956  * Returns: 0 - success, -ENODEV - failure.
8957  **/
8958 int
8959 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8960 {
8961
8962         switch (dev_grp) {
8963         case LPFC_PCI_DEV_LP:
8964                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8965                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8966                 break;
8967         case LPFC_PCI_DEV_OC:
8968                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8969                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8970                 break;
8971         default:
8972                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8973                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
8974                                 dev_grp);
8975                 return -ENODEV;
8976                 break;
8977         }
8978         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8979         return 0;
8980 }
8981
8982 /**
8983  * lpfc_sli_calc_ring - Calculates which ring to use
8984  * @phba: Pointer to HBA context object.
8985  * @ring_number: Initial ring
8986  * @piocb: Pointer to command iocb.
8987  *
8988  * For SLI4, FCP IO can deferred to one fo many WQs, based on
8989  * fcp_wqidx, thus we need to calculate the corresponding ring.
8990  * Since ABORTS must go on the same WQ of the command they are
8991  * aborting, we use command's fcp_wqidx.
8992  */
8993 static int
8994 lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8995                     struct lpfc_iocbq *piocb)
8996 {
8997         if (phba->sli_rev < LPFC_SLI_REV4)
8998                 return ring_number;
8999
9000         if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9001                 if (!(phba->cfg_fof) ||
9002                                 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9003                         if (unlikely(!phba->sli4_hba.fcp_wq))
9004                                 return LPFC_HBA_ERROR;
9005                         /*
9006                          * for abort iocb fcp_wqidx should already
9007                          * be setup based on what work queue we used.
9008                          */
9009                         if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
9010                                 piocb->fcp_wqidx =
9011                                         lpfc_sli4_scmd_to_wqidx_distr(phba,
9012                                                               piocb->context1);
9013                         ring_number = MAX_SLI3_CONFIGURED_RINGS +
9014                                 piocb->fcp_wqidx;
9015                 } else {
9016                         if (unlikely(!phba->sli4_hba.oas_wq))
9017                                 return LPFC_HBA_ERROR;
9018                         piocb->fcp_wqidx = 0;
9019                         ring_number =  LPFC_FCP_OAS_RING;
9020                 }
9021         }
9022         return ring_number;
9023 }
9024
9025 /**
9026  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9027  * @phba: Pointer to HBA context object.
9028  * @pring: Pointer to driver SLI ring object.
9029  * @piocb: Pointer to command iocb.
9030  * @flag: Flag indicating if this command can be put into txq.
9031  *
9032  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9033  * function. This function gets the hbalock and calls
9034  * __lpfc_sli_issue_iocb function and will return the error returned
9035  * by __lpfc_sli_issue_iocb function. This wrapper is used by
9036  * functions which do not hold hbalock.
9037  **/
9038 int
9039 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9040                     struct lpfc_iocbq *piocb, uint32_t flag)
9041 {
9042         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9043         struct lpfc_sli_ring *pring;
9044         struct lpfc_queue *fpeq;
9045         struct lpfc_eqe *eqe;
9046         unsigned long iflags;
9047         int rc, idx;
9048
9049         if (phba->sli_rev == LPFC_SLI_REV4) {
9050                 ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
9051                 if (unlikely(ring_number == LPFC_HBA_ERROR))
9052                         return IOCB_ERROR;
9053                 idx = piocb->fcp_wqidx;
9054
9055                 pring = &phba->sli.ring[ring_number];
9056                 spin_lock_irqsave(&pring->ring_lock, iflags);
9057                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9058                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9059
9060                 if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
9061                         fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
9062
9063                         if (atomic_dec_and_test(&fcp_eq_hdl->
9064                                 fcp_eq_in_use)) {
9065
9066                                 /* Get associated EQ with this index */
9067                                 fpeq = phba->sli4_hba.hba_eq[idx];
9068
9069                                 /* Turn off interrupts from this EQ */
9070                                 lpfc_sli4_eq_clr_intr(fpeq);
9071
9072                                 /*
9073                                  * Process all the events on FCP EQ
9074                                  */
9075                                 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9076                                         lpfc_sli4_hba_handle_eqe(phba,
9077                                                 eqe, idx);
9078                                         fpeq->EQ_processed++;
9079                                 }
9080
9081                                 /* Always clear and re-arm the EQ */
9082                                 lpfc_sli4_eq_release(fpeq,
9083                                         LPFC_QUEUE_REARM);
9084                         }
9085                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
9086                 }
9087         } else {
9088                 /* For now, SLI2/3 will still use hbalock */
9089                 spin_lock_irqsave(&phba->hbalock, iflags);
9090                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9091                 spin_unlock_irqrestore(&phba->hbalock, iflags);
9092         }
9093         return rc;
9094 }
9095
9096 /**
9097  * lpfc_extra_ring_setup - Extra ring setup function
9098  * @phba: Pointer to HBA context object.
9099  *
9100  * This function is called while driver attaches with the
9101  * HBA to setup the extra ring. The extra ring is used
9102  * only when driver needs to support target mode functionality
9103  * or IP over FC functionalities.
9104  *
9105  * This function is called with no lock held.
9106  **/
9107 static int
9108 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9109 {
9110         struct lpfc_sli *psli;
9111         struct lpfc_sli_ring *pring;
9112
9113         psli = &phba->sli;
9114
9115         /* Adjust cmd/rsp ring iocb entries more evenly */
9116
9117         /* Take some away from the FCP ring */
9118         pring = &psli->ring[psli->fcp_ring];
9119         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9120         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9121         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9122         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9123
9124         /* and give them to the extra ring */
9125         pring = &psli->ring[psli->extra_ring];
9126
9127         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9128         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9129         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9130         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9131
9132         /* Setup default profile for this ring */
9133         pring->iotag_max = 4096;
9134         pring->num_mask = 1;
9135         pring->prt[0].profile = 0;      /* Mask 0 */
9136         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9137         pring->prt[0].type = phba->cfg_multi_ring_type;
9138         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9139         return 0;
9140 }
9141
9142 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9143  * @phba: Pointer to HBA context object.
9144  * @iocbq: Pointer to iocb object.
9145  *
9146  * The async_event handler calls this routine when it receives
9147  * an ASYNC_STATUS_CN event from the port.  The port generates
9148  * this event when an Abort Sequence request to an rport fails
9149  * twice in succession.  The abort could be originated by the
9150  * driver or by the port.  The ABTS could have been for an ELS
9151  * or FCP IO.  The port only generates this event when an ABTS
9152  * fails to complete after one retry.
9153  */
9154 static void
9155 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9156                           struct lpfc_iocbq *iocbq)
9157 {
9158         struct lpfc_nodelist *ndlp = NULL;
9159         uint16_t rpi = 0, vpi = 0;
9160         struct lpfc_vport *vport = NULL;
9161
9162         /* The rpi in the ulpContext is vport-sensitive. */
9163         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9164         rpi = iocbq->iocb.ulpContext;
9165
9166         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9167                         "3092 Port generated ABTS async event "
9168                         "on vpi %d rpi %d status 0x%x\n",
9169                         vpi, rpi, iocbq->iocb.ulpStatus);
9170
9171         vport = lpfc_find_vport_by_vpid(phba, vpi);
9172         if (!vport)
9173                 goto err_exit;
9174         ndlp = lpfc_findnode_rpi(vport, rpi);
9175         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9176                 goto err_exit;
9177
9178         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9179                 lpfc_sli_abts_recover_port(vport, ndlp);
9180         return;
9181
9182  err_exit:
9183         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9184                         "3095 Event Context not found, no "
9185                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9186                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9187                         vpi, rpi);
9188 }
9189
9190 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9191  * @phba: pointer to HBA context object.
9192  * @ndlp: nodelist pointer for the impacted rport.
9193  * @axri: pointer to the wcqe containing the failed exchange.
9194  *
9195  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9196  * port.  The port generates this event when an abort exchange request to an
9197  * rport fails twice in succession with no reply.  The abort could be originated
9198  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
9199  */
9200 void
9201 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9202                            struct lpfc_nodelist *ndlp,
9203                            struct sli4_wcqe_xri_aborted *axri)
9204 {
9205         struct lpfc_vport *vport;
9206         uint32_t ext_status = 0;
9207
9208         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9209                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9210                                 "3115 Node Context not found, driver "
9211                                 "ignoring abts err event\n");
9212                 return;
9213         }
9214
9215         vport = ndlp->vport;
9216         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9217                         "3116 Port generated FCP XRI ABORT event on "
9218                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9219                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9220                         bf_get(lpfc_wcqe_xa_xri, axri),
9221                         bf_get(lpfc_wcqe_xa_status, axri),
9222                         axri->parameter);
9223
9224         /*
9225          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
9226          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9227          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9228          */
9229         ext_status = axri->parameter & IOERR_PARAM_MASK;
9230         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9231             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9232                 lpfc_sli_abts_recover_port(vport, ndlp);
9233 }
9234
9235 /**
9236  * lpfc_sli_async_event_handler - ASYNC iocb handler function
9237  * @phba: Pointer to HBA context object.
9238  * @pring: Pointer to driver SLI ring object.
9239  * @iocbq: Pointer to iocb object.
9240  *
9241  * This function is called by the slow ring event handler
9242  * function when there is an ASYNC event iocb in the ring.
9243  * This function is called with no lock held.
9244  * Currently this function handles only temperature related
9245  * ASYNC events. The function decodes the temperature sensor
9246  * event message and posts events for the management applications.
9247  **/
9248 static void
9249 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9250         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9251 {
9252         IOCB_t *icmd;
9253         uint16_t evt_code;
9254         struct temp_event temp_event_data;
9255         struct Scsi_Host *shost;
9256         uint32_t *iocb_w;
9257
9258         icmd = &iocbq->iocb;
9259         evt_code = icmd->un.asyncstat.evt_code;
9260
9261         switch (evt_code) {
9262         case ASYNC_TEMP_WARN:
9263         case ASYNC_TEMP_SAFE:
9264                 temp_event_data.data = (uint32_t) icmd->ulpContext;
9265                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9266                 if (evt_code == ASYNC_TEMP_WARN) {
9267                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9268                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9269                                 "0347 Adapter is very hot, please take "
9270                                 "corrective action. temperature : %d Celsius\n",
9271                                 (uint32_t) icmd->ulpContext);
9272                 } else {
9273                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
9274                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9275                                 "0340 Adapter temperature is OK now. "
9276                                 "temperature : %d Celsius\n",
9277                                 (uint32_t) icmd->ulpContext);
9278                 }
9279
9280                 /* Send temperature change event to applications */
9281                 shost = lpfc_shost_from_vport(phba->pport);
9282                 fc_host_post_vendor_event(shost, fc_get_event_number(),
9283                         sizeof(temp_event_data), (char *) &temp_event_data,
9284                         LPFC_NL_VENDOR_ID);
9285                 break;
9286         case ASYNC_STATUS_CN:
9287                 lpfc_sli_abts_err_handler(phba, iocbq);
9288                 break;
9289         default:
9290                 iocb_w = (uint32_t *) icmd;
9291                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9292                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
9293                         " evt_code 0x%x\n"
9294                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
9295                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
9296                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
9297                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9298                         pring->ringno, icmd->un.asyncstat.evt_code,
9299                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9300                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9301                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9302                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9303
9304                 break;
9305         }
9306 }
9307
9308
9309 /**
9310  * lpfc_sli_setup - SLI ring setup function
9311  * @phba: Pointer to HBA context object.
9312  *
9313  * lpfc_sli_setup sets up rings of the SLI interface with
9314  * number of iocbs per ring and iotags. This function is
9315  * called while driver attach to the HBA and before the
9316  * interrupts are enabled. So there is no need for locking.
9317  *
9318  * This function always returns 0.
9319  **/
9320 int
9321 lpfc_sli_setup(struct lpfc_hba *phba)
9322 {
9323         int i, totiocbsize = 0;
9324         struct lpfc_sli *psli = &phba->sli;
9325         struct lpfc_sli_ring *pring;
9326
9327         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9328         if (phba->sli_rev == LPFC_SLI_REV4)
9329                 psli->num_rings += phba->cfg_fcp_io_channel;
9330         psli->sli_flag = 0;
9331         psli->fcp_ring = LPFC_FCP_RING;
9332         psli->next_ring = LPFC_FCP_NEXT_RING;
9333         psli->extra_ring = LPFC_EXTRA_RING;
9334
9335         psli->iocbq_lookup = NULL;
9336         psli->iocbq_lookup_len = 0;
9337         psli->last_iotag = 0;
9338
9339         for (i = 0; i < psli->num_rings; i++) {
9340                 pring = &psli->ring[i];
9341                 switch (i) {
9342                 case LPFC_FCP_RING:     /* ring 0 - FCP */
9343                         /* numCiocb and numRiocb are used in config_port */
9344                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9345                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9346                         pring->sli.sli3.numCiocb +=
9347                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9348                         pring->sli.sli3.numRiocb +=
9349                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9350                         pring->sli.sli3.numCiocb +=
9351                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9352                         pring->sli.sli3.numRiocb +=
9353                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9354                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9355                                                         SLI3_IOCB_CMD_SIZE :
9356                                                         SLI2_IOCB_CMD_SIZE;
9357                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9358                                                         SLI3_IOCB_RSP_SIZE :
9359                                                         SLI2_IOCB_RSP_SIZE;
9360                         pring->iotag_ctr = 0;
9361                         pring->iotag_max =
9362                             (phba->cfg_hba_queue_depth * 2);
9363                         pring->fast_iotag = pring->iotag_max;
9364                         pring->num_mask = 0;
9365                         break;
9366                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
9367                         /* numCiocb and numRiocb are used in config_port */
9368                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9369                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9370                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9371                                                         SLI3_IOCB_CMD_SIZE :
9372                                                         SLI2_IOCB_CMD_SIZE;
9373                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9374                                                         SLI3_IOCB_RSP_SIZE :
9375                                                         SLI2_IOCB_RSP_SIZE;
9376                         pring->iotag_max = phba->cfg_hba_queue_depth;
9377                         pring->num_mask = 0;
9378                         break;
9379                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
9380                         /* numCiocb and numRiocb are used in config_port */
9381                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9382                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9383                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9384                                                         SLI3_IOCB_CMD_SIZE :
9385                                                         SLI2_IOCB_CMD_SIZE;
9386                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9387                                                         SLI3_IOCB_RSP_SIZE :
9388                                                         SLI2_IOCB_RSP_SIZE;
9389                         pring->fast_iotag = 0;
9390                         pring->iotag_ctr = 0;
9391                         pring->iotag_max = 4096;
9392                         pring->lpfc_sli_rcv_async_status =
9393                                 lpfc_sli_async_event_handler;
9394                         pring->num_mask = LPFC_MAX_RING_MASK;
9395                         pring->prt[0].profile = 0;      /* Mask 0 */
9396                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9397                         pring->prt[0].type = FC_TYPE_ELS;
9398                         pring->prt[0].lpfc_sli_rcv_unsol_event =
9399                             lpfc_els_unsol_event;
9400                         pring->prt[1].profile = 0;      /* Mask 1 */
9401                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
9402                         pring->prt[1].type = FC_TYPE_ELS;
9403                         pring->prt[1].lpfc_sli_rcv_unsol_event =
9404                             lpfc_els_unsol_event;
9405                         pring->prt[2].profile = 0;      /* Mask 2 */
9406                         /* NameServer Inquiry */
9407                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9408                         /* NameServer */
9409                         pring->prt[2].type = FC_TYPE_CT;
9410                         pring->prt[2].lpfc_sli_rcv_unsol_event =
9411                             lpfc_ct_unsol_event;
9412                         pring->prt[3].profile = 0;      /* Mask 3 */
9413                         /* NameServer response */
9414                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9415                         /* NameServer */
9416                         pring->prt[3].type = FC_TYPE_CT;
9417                         pring->prt[3].lpfc_sli_rcv_unsol_event =
9418                             lpfc_ct_unsol_event;
9419                         break;
9420                 }
9421                 totiocbsize += (pring->sli.sli3.numCiocb *
9422                         pring->sli.sli3.sizeCiocb) +
9423                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9424         }
9425         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9426                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
9427                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9428                        "SLI2 SLIM Data: x%x x%lx\n",
9429                        phba->brd_no, totiocbsize,
9430                        (unsigned long) MAX_SLIM_IOCB_SIZE);
9431         }
9432         if (phba->cfg_multi_ring_support == 2)
9433                 lpfc_extra_ring_setup(phba);
9434
9435         return 0;
9436 }
9437
9438 /**
9439  * lpfc_sli_queue_setup - Queue initialization function
9440  * @phba: Pointer to HBA context object.
9441  *
9442  * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
9443  * ring. This function also initializes ring indices of each ring.
9444  * This function is called during the initialization of the SLI
9445  * interface of an HBA.
9446  * This function is called with no lock held and always returns
9447  * 1.
9448  **/
9449 int
9450 lpfc_sli_queue_setup(struct lpfc_hba *phba)
9451 {
9452         struct lpfc_sli *psli;
9453         struct lpfc_sli_ring *pring;
9454         int i;
9455
9456         psli = &phba->sli;
9457         spin_lock_irq(&phba->hbalock);
9458         INIT_LIST_HEAD(&psli->mboxq);
9459         INIT_LIST_HEAD(&psli->mboxq_cmpl);
9460         /* Initialize list headers for txq and txcmplq as double linked lists */
9461         for (i = 0; i < psli->num_rings; i++) {
9462                 pring = &psli->ring[i];
9463                 pring->ringno = i;
9464                 pring->sli.sli3.next_cmdidx  = 0;
9465                 pring->sli.sli3.local_getidx = 0;
9466                 pring->sli.sli3.cmdidx = 0;
9467                 pring->flag = 0;
9468                 INIT_LIST_HEAD(&pring->txq);
9469                 INIT_LIST_HEAD(&pring->txcmplq);
9470                 INIT_LIST_HEAD(&pring->iocb_continueq);
9471                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9472                 INIT_LIST_HEAD(&pring->postbufq);
9473                 spin_lock_init(&pring->ring_lock);
9474         }
9475         spin_unlock_irq(&phba->hbalock);
9476         return 1;
9477 }
9478
9479 /**
9480  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9481  * @phba: Pointer to HBA context object.
9482  *
9483  * This routine flushes the mailbox command subsystem. It will unconditionally
9484  * flush all the mailbox commands in the three possible stages in the mailbox
9485  * command sub-system: pending mailbox command queue; the outstanding mailbox
9486  * command; and completed mailbox command queue. It is caller's responsibility
9487  * to make sure that the driver is in the proper state to flush the mailbox
9488  * command sub-system. Namely, the posting of mailbox commands into the
9489  * pending mailbox command queue from the various clients must be stopped;
9490  * either the HBA is in a state that it will never works on the outstanding
9491  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9492  * mailbox command has been completed.
9493  **/
9494 static void
9495 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9496 {
9497         LIST_HEAD(completions);
9498         struct lpfc_sli *psli = &phba->sli;
9499         LPFC_MBOXQ_t *pmb;
9500         unsigned long iflag;
9501
9502         /* Flush all the mailbox commands in the mbox system */
9503         spin_lock_irqsave(&phba->hbalock, iflag);
9504         /* The pending mailbox command queue */
9505         list_splice_init(&phba->sli.mboxq, &completions);
9506         /* The outstanding active mailbox command */
9507         if (psli->mbox_active) {
9508                 list_add_tail(&psli->mbox_active->list, &completions);
9509                 psli->mbox_active = NULL;
9510                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9511         }
9512         /* The completed mailbox command queue */
9513         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9514         spin_unlock_irqrestore(&phba->hbalock, iflag);
9515
9516         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9517         while (!list_empty(&completions)) {
9518                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9519                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9520                 if (pmb->mbox_cmpl)
9521                         pmb->mbox_cmpl(phba, pmb);
9522         }
9523 }
9524
9525 /**
9526  * lpfc_sli_host_down - Vport cleanup function
9527  * @vport: Pointer to virtual port object.
9528  *
9529  * lpfc_sli_host_down is called to clean up the resources
9530  * associated with a vport before destroying virtual
9531  * port data structures.
9532  * This function does following operations:
9533  * - Free discovery resources associated with this virtual
9534  *   port.
9535  * - Free iocbs associated with this virtual port in
9536  *   the txq.
9537  * - Send abort for all iocb commands associated with this
9538  *   vport in txcmplq.
9539  *
9540  * This function is called with no lock held and always returns 1.
9541  **/
9542 int
9543 lpfc_sli_host_down(struct lpfc_vport *vport)
9544 {
9545         LIST_HEAD(completions);
9546         struct lpfc_hba *phba = vport->phba;
9547         struct lpfc_sli *psli = &phba->sli;
9548         struct lpfc_sli_ring *pring;
9549         struct lpfc_iocbq *iocb, *next_iocb;
9550         int i;
9551         unsigned long flags = 0;
9552         uint16_t prev_pring_flag;
9553
9554         lpfc_cleanup_discovery_resources(vport);
9555
9556         spin_lock_irqsave(&phba->hbalock, flags);
9557         for (i = 0; i < psli->num_rings; i++) {
9558                 pring = &psli->ring[i];
9559                 prev_pring_flag = pring->flag;
9560                 /* Only slow rings */
9561                 if (pring->ringno == LPFC_ELS_RING) {
9562                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9563                         /* Set the lpfc data pending flag */
9564                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9565                 }
9566                 /*
9567                  * Error everything on the txq since these iocbs have not been
9568                  * given to the FW yet.
9569                  */
9570                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9571                         if (iocb->vport != vport)
9572                                 continue;
9573                         list_move_tail(&iocb->list, &completions);
9574                 }
9575
9576                 /* Next issue ABTS for everything on the txcmplq */
9577                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9578                                                                         list) {
9579                         if (iocb->vport != vport)
9580                                 continue;
9581                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9582                 }
9583
9584                 pring->flag = prev_pring_flag;
9585         }
9586
9587         spin_unlock_irqrestore(&phba->hbalock, flags);
9588
9589         /* Cancel all the IOCBs from the completions list */
9590         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9591                               IOERR_SLI_DOWN);
9592         return 1;
9593 }
9594
9595 /**
9596  * lpfc_sli_hba_down - Resource cleanup function for the HBA
9597  * @phba: Pointer to HBA context object.
9598  *
9599  * This function cleans up all iocb, buffers, mailbox commands
9600  * while shutting down the HBA. This function is called with no
9601  * lock held and always returns 1.
9602  * This function does the following to cleanup driver resources:
9603  * - Free discovery resources for each virtual port
9604  * - Cleanup any pending fabric iocbs
9605  * - Iterate through the iocb txq and free each entry
9606  *   in the list.
9607  * - Free up any buffer posted to the HBA
9608  * - Free mailbox commands in the mailbox queue.
9609  **/
9610 int
9611 lpfc_sli_hba_down(struct lpfc_hba *phba)
9612 {
9613         LIST_HEAD(completions);
9614         struct lpfc_sli *psli = &phba->sli;
9615         struct lpfc_sli_ring *pring;
9616         struct lpfc_dmabuf *buf_ptr;
9617         unsigned long flags = 0;
9618         int i;
9619
9620         /* Shutdown the mailbox command sub-system */
9621         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9622
9623         lpfc_hba_down_prep(phba);
9624
9625         lpfc_fabric_abort_hba(phba);
9626
9627         spin_lock_irqsave(&phba->hbalock, flags);
9628         for (i = 0; i < psli->num_rings; i++) {
9629                 pring = &psli->ring[i];
9630                 /* Only slow rings */
9631                 if (pring->ringno == LPFC_ELS_RING) {
9632                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
9633                         /* Set the lpfc data pending flag */
9634                         set_bit(LPFC_DATA_READY, &phba->data_flags);
9635                 }
9636
9637                 /*
9638                  * Error everything on the txq since these iocbs have not been
9639                  * given to the FW yet.
9640                  */
9641                 list_splice_init(&pring->txq, &completions);
9642         }
9643         spin_unlock_irqrestore(&phba->hbalock, flags);
9644
9645         /* Cancel all the IOCBs from the completions list */
9646         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9647                               IOERR_SLI_DOWN);
9648
9649         spin_lock_irqsave(&phba->hbalock, flags);
9650         list_splice_init(&phba->elsbuf, &completions);
9651         phba->elsbuf_cnt = 0;
9652         phba->elsbuf_prev_cnt = 0;
9653         spin_unlock_irqrestore(&phba->hbalock, flags);
9654
9655         while (!list_empty(&completions)) {
9656                 list_remove_head(&completions, buf_ptr,
9657                         struct lpfc_dmabuf, list);
9658                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9659                 kfree(buf_ptr);
9660         }
9661
9662         /* Return any active mbox cmds */
9663         del_timer_sync(&psli->mbox_tmo);
9664
9665         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9666         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9667         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9668
9669         return 1;
9670 }
9671
9672 /**
9673  * lpfc_sli_pcimem_bcopy - SLI memory copy function
9674  * @srcp: Source memory pointer.
9675  * @destp: Destination memory pointer.
9676  * @cnt: Number of words required to be copied.
9677  *
9678  * This function is used for copying data between driver memory
9679  * and the SLI memory. This function also changes the endianness
9680  * of each word if native endianness is different from SLI
9681  * endianness. This function can be called with or without
9682  * lock.
9683  **/
9684 void
9685 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9686 {
9687         uint32_t *src = srcp;
9688         uint32_t *dest = destp;
9689         uint32_t ldata;
9690         int i;
9691
9692         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9693                 ldata = *src;
9694                 ldata = le32_to_cpu(ldata);
9695                 *dest = ldata;
9696                 src++;
9697                 dest++;
9698         }
9699 }
9700
9701
9702 /**
9703  * lpfc_sli_bemem_bcopy - SLI memory copy function
9704  * @srcp: Source memory pointer.
9705  * @destp: Destination memory pointer.
9706  * @cnt: Number of words required to be copied.
9707  *
9708  * This function is used for copying data between a data structure
9709  * with big endian representation to local endianness.
9710  * This function can be called with or without lock.
9711  **/
9712 void
9713 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9714 {
9715         uint32_t *src = srcp;
9716         uint32_t *dest = destp;
9717         uint32_t ldata;
9718         int i;
9719
9720         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9721                 ldata = *src;
9722                 ldata = be32_to_cpu(ldata);
9723                 *dest = ldata;
9724                 src++;
9725                 dest++;
9726         }
9727 }
9728
9729 /**
9730  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9731  * @phba: Pointer to HBA context object.
9732  * @pring: Pointer to driver SLI ring object.
9733  * @mp: Pointer to driver buffer object.
9734  *
9735  * This function is called with no lock held.
9736  * It always return zero after adding the buffer to the postbufq
9737  * buffer list.
9738  **/
9739 int
9740 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9741                          struct lpfc_dmabuf *mp)
9742 {
9743         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9744            later */
9745         spin_lock_irq(&phba->hbalock);
9746         list_add_tail(&mp->list, &pring->postbufq);
9747         pring->postbufq_cnt++;
9748         spin_unlock_irq(&phba->hbalock);
9749         return 0;
9750 }
9751
9752 /**
9753  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9754  * @phba: Pointer to HBA context object.
9755  *
9756  * When HBQ is enabled, buffers are searched based on tags. This function
9757  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9758  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9759  * does not conflict with tags of buffer posted for unsolicited events.
9760  * The function returns the allocated tag. The function is called with
9761  * no locks held.
9762  **/
9763 uint32_t
9764 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9765 {
9766         spin_lock_irq(&phba->hbalock);
9767         phba->buffer_tag_count++;
9768         /*
9769          * Always set the QUE_BUFTAG_BIT to distiguish between
9770          * a tag assigned by HBQ.
9771          */
9772         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9773         spin_unlock_irq(&phba->hbalock);
9774         return phba->buffer_tag_count;
9775 }
9776
9777 /**
9778  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9779  * @phba: Pointer to HBA context object.
9780  * @pring: Pointer to driver SLI ring object.
9781  * @tag: Buffer tag.
9782  *
9783  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9784  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9785  * iocb is posted to the response ring with the tag of the buffer.
9786  * This function searches the pring->postbufq list using the tag
9787  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9788  * iocb. If the buffer is found then lpfc_dmabuf object of the
9789  * buffer is returned to the caller else NULL is returned.
9790  * This function is called with no lock held.
9791  **/
9792 struct lpfc_dmabuf *
9793 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9794                         uint32_t tag)
9795 {
9796         struct lpfc_dmabuf *mp, *next_mp;
9797         struct list_head *slp = &pring->postbufq;
9798
9799         /* Search postbufq, from the beginning, looking for a match on tag */
9800         spin_lock_irq(&phba->hbalock);
9801         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9802                 if (mp->buffer_tag == tag) {
9803                         list_del_init(&mp->list);
9804                         pring->postbufq_cnt--;
9805                         spin_unlock_irq(&phba->hbalock);
9806                         return mp;
9807                 }
9808         }
9809
9810         spin_unlock_irq(&phba->hbalock);
9811         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9812                         "0402 Cannot find virtual addr for buffer tag on "
9813                         "ring %d Data x%lx x%p x%p x%x\n",
9814                         pring->ringno, (unsigned long) tag,
9815                         slp->next, slp->prev, pring->postbufq_cnt);
9816
9817         return NULL;
9818 }
9819
9820 /**
9821  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9822  * @phba: Pointer to HBA context object.
9823  * @pring: Pointer to driver SLI ring object.
9824  * @phys: DMA address of the buffer.
9825  *
9826  * This function searches the buffer list using the dma_address
9827  * of unsolicited event to find the driver's lpfc_dmabuf object
9828  * corresponding to the dma_address. The function returns the
9829  * lpfc_dmabuf object if a buffer is found else it returns NULL.
9830  * This function is called by the ct and els unsolicited event
9831  * handlers to get the buffer associated with the unsolicited
9832  * event.
9833  *
9834  * This function is called with no lock held.
9835  **/
9836 struct lpfc_dmabuf *
9837 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9838                          dma_addr_t phys)
9839 {
9840         struct lpfc_dmabuf *mp, *next_mp;
9841         struct list_head *slp = &pring->postbufq;
9842
9843         /* Search postbufq, from the beginning, looking for a match on phys */
9844         spin_lock_irq(&phba->hbalock);
9845         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9846                 if (mp->phys == phys) {
9847                         list_del_init(&mp->list);
9848                         pring->postbufq_cnt--;
9849                         spin_unlock_irq(&phba->hbalock);
9850                         return mp;
9851                 }
9852         }
9853
9854         spin_unlock_irq(&phba->hbalock);
9855         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9856                         "0410 Cannot find virtual addr for mapped buf on "
9857                         "ring %d Data x%llx x%p x%p x%x\n",
9858                         pring->ringno, (unsigned long long)phys,
9859                         slp->next, slp->prev, pring->postbufq_cnt);
9860         return NULL;
9861 }
9862
9863 /**
9864  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9865  * @phba: Pointer to HBA context object.
9866  * @cmdiocb: Pointer to driver command iocb object.
9867  * @rspiocb: Pointer to driver response iocb object.
9868  *
9869  * This function is the completion handler for the abort iocbs for
9870  * ELS commands. This function is called from the ELS ring event
9871  * handler with no lock held. This function frees memory resources
9872  * associated with the abort iocb.
9873  **/
9874 static void
9875 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9876                         struct lpfc_iocbq *rspiocb)
9877 {
9878         IOCB_t *irsp = &rspiocb->iocb;
9879         uint16_t abort_iotag, abort_context;
9880         struct lpfc_iocbq *abort_iocb = NULL;
9881
9882         if (irsp->ulpStatus) {
9883
9884                 /*
9885                  * Assume that the port already completed and returned, or
9886                  * will return the iocb. Just Log the message.
9887                  */
9888                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9889                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9890
9891                 spin_lock_irq(&phba->hbalock);
9892                 if (phba->sli_rev < LPFC_SLI_REV4) {
9893                         if (abort_iotag != 0 &&
9894                                 abort_iotag <= phba->sli.last_iotag)
9895                                 abort_iocb =
9896                                         phba->sli.iocbq_lookup[abort_iotag];
9897                 } else
9898                         /* For sli4 the abort_tag is the XRI,
9899                          * so the abort routine puts the iotag  of the iocb
9900                          * being aborted in the context field of the abort
9901                          * IOCB.
9902                          */
9903                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
9904
9905                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9906                                 "0327 Cannot abort els iocb %p "
9907                                 "with tag %x context %x, abort status %x, "
9908                                 "abort code %x\n",
9909                                 abort_iocb, abort_iotag, abort_context,
9910                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
9911
9912                 spin_unlock_irq(&phba->hbalock);
9913         }
9914         lpfc_sli_release_iocbq(phba, cmdiocb);
9915         return;
9916 }
9917
9918 /**
9919  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9920  * @phba: Pointer to HBA context object.
9921  * @cmdiocb: Pointer to driver command iocb object.
9922  * @rspiocb: Pointer to driver response iocb object.
9923  *
9924  * The function is called from SLI ring event handler with no
9925  * lock held. This function is the completion handler for ELS commands
9926  * which are aborted. The function frees memory resources used for
9927  * the aborted ELS commands.
9928  **/
9929 static void
9930 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9931                      struct lpfc_iocbq *rspiocb)
9932 {
9933         IOCB_t *irsp = &rspiocb->iocb;
9934
9935         /* ELS cmd tag <ulpIoTag> completes */
9936         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9937                         "0139 Ignoring ELS cmd tag x%x completion Data: "
9938                         "x%x x%x x%x\n",
9939                         irsp->ulpIoTag, irsp->ulpStatus,
9940                         irsp->un.ulpWord[4], irsp->ulpTimeout);
9941         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9942                 lpfc_ct_free_iocb(phba, cmdiocb);
9943         else
9944                 lpfc_els_free_iocb(phba, cmdiocb);
9945         return;
9946 }
9947
9948 /**
9949  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9950  * @phba: Pointer to HBA context object.
9951  * @pring: Pointer to driver SLI ring object.
9952  * @cmdiocb: Pointer to driver command iocb object.
9953  *
9954  * This function issues an abort iocb for the provided command iocb down to
9955  * the port. Other than the case the outstanding command iocb is an abort
9956  * request, this function issues abort out unconditionally. This function is
9957  * called with hbalock held. The function returns 0 when it fails due to
9958  * memory allocation failure or when the command iocb is an abort request.
9959  **/
9960 static int
9961 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9962                            struct lpfc_iocbq *cmdiocb)
9963 {
9964         struct lpfc_vport *vport = cmdiocb->vport;
9965         struct lpfc_iocbq *abtsiocbp;
9966         IOCB_t *icmd = NULL;
9967         IOCB_t *iabt = NULL;
9968         int ring_number;
9969         int retval;
9970         unsigned long iflags;
9971
9972         lockdep_assert_held(&phba->hbalock);
9973
9974         /*
9975          * There are certain command types we don't want to abort.  And we
9976          * don't want to abort commands that are already in the process of
9977          * being aborted.
9978          */
9979         icmd = &cmdiocb->iocb;
9980         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9981             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9982             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9983                 return 0;
9984
9985         /* issue ABTS for this IOCB based on iotag */
9986         abtsiocbp = __lpfc_sli_get_iocbq(phba);
9987         if (abtsiocbp == NULL)
9988                 return 0;
9989
9990         /* This signals the response to set the correct status
9991          * before calling the completion handler
9992          */
9993         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9994
9995         iabt = &abtsiocbp->iocb;
9996         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9997         iabt->un.acxri.abortContextTag = icmd->ulpContext;
9998         if (phba->sli_rev == LPFC_SLI_REV4) {
9999                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10000                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10001         }
10002         else
10003                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10004         iabt->ulpLe = 1;
10005         iabt->ulpClass = icmd->ulpClass;
10006
10007         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10008         abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
10009         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10010                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10011         if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10012                 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10013
10014         if (phba->link_state >= LPFC_LINK_UP)
10015                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10016         else
10017                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10018
10019         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10020         abtsiocbp->vport = vport;
10021
10022         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10023                          "0339 Abort xri x%x, original iotag x%x, "
10024                          "abort cmd iotag x%x\n",
10025                          iabt->un.acxri.abortIoTag,
10026                          iabt->un.acxri.abortContextTag,
10027                          abtsiocbp->iotag);
10028
10029         if (phba->sli_rev == LPFC_SLI_REV4) {
10030                 ring_number =
10031                         lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
10032                 if (unlikely(ring_number == LPFC_HBA_ERROR))
10033                         return 0;
10034                 pring = &phba->sli.ring[ring_number];
10035                 /* Note: both hbalock and ring_lock need to be set here */
10036                 spin_lock_irqsave(&pring->ring_lock, iflags);
10037                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10038                         abtsiocbp, 0);
10039                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10040         } else {
10041                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10042                         abtsiocbp, 0);
10043         }
10044
10045         if (retval)
10046                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10047
10048         /*
10049          * Caller to this routine should check for IOCB_ERROR
10050          * and handle it properly.  This routine no longer removes
10051          * iocb off txcmplq and call compl in case of IOCB_ERROR.
10052          */
10053         return retval;
10054 }
10055
10056 /**
10057  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10058  * @phba: Pointer to HBA context object.
10059  * @pring: Pointer to driver SLI ring object.
10060  * @cmdiocb: Pointer to driver command iocb object.
10061  *
10062  * This function issues an abort iocb for the provided command iocb. In case
10063  * of unloading, the abort iocb will not be issued to commands on the ELS
10064  * ring. Instead, the callback function shall be changed to those commands
10065  * so that nothing happens when them finishes. This function is called with
10066  * hbalock held. The function returns 0 when the command iocb is an abort
10067  * request.
10068  **/
10069 int
10070 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10071                            struct lpfc_iocbq *cmdiocb)
10072 {
10073         struct lpfc_vport *vport = cmdiocb->vport;
10074         int retval = IOCB_ERROR;
10075         IOCB_t *icmd = NULL;
10076
10077         lockdep_assert_held(&phba->hbalock);
10078
10079         /*
10080          * There are certain command types we don't want to abort.  And we
10081          * don't want to abort commands that are already in the process of
10082          * being aborted.
10083          */
10084         icmd = &cmdiocb->iocb;
10085         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10086             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10087             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10088                 return 0;
10089
10090         /*
10091          * If we're unloading, don't abort iocb on the ELS ring, but change
10092          * the callback so that nothing happens when it finishes.
10093          */
10094         if ((vport->load_flag & FC_UNLOADING) &&
10095             (pring->ringno == LPFC_ELS_RING)) {
10096                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10097                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10098                 else
10099                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10100                 goto abort_iotag_exit;
10101         }
10102
10103         /* Now, we try to issue the abort to the cmdiocb out */
10104         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10105
10106 abort_iotag_exit:
10107         /*
10108          * Caller to this routine should check for IOCB_ERROR
10109          * and handle it properly.  This routine no longer removes
10110          * iocb off txcmplq and call compl in case of IOCB_ERROR.
10111          */
10112         return retval;
10113 }
10114
10115 /**
10116  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10117  * @phba: pointer to lpfc HBA data structure.
10118  *
10119  * This routine will abort all pending and outstanding iocbs to an HBA.
10120  **/
10121 void
10122 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10123 {
10124         struct lpfc_sli *psli = &phba->sli;
10125         struct lpfc_sli_ring *pring;
10126         int i;
10127
10128         for (i = 0; i < psli->num_rings; i++) {
10129                 pring = &psli->ring[i];
10130                 lpfc_sli_abort_iocb_ring(phba, pring);
10131         }
10132 }
10133
10134 /**
10135  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
10136  * @iocbq: Pointer to driver iocb object.
10137  * @vport: Pointer to driver virtual port object.
10138  * @tgt_id: SCSI ID of the target.
10139  * @lun_id: LUN ID of the scsi device.
10140  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10141  *
10142  * This function acts as an iocb filter for functions which abort or count
10143  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10144  * 0 if the filtering criteria is met for the given iocb and will return
10145  * 1 if the filtering criteria is not met.
10146  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10147  * given iocb is for the SCSI device specified by vport, tgt_id and
10148  * lun_id parameter.
10149  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
10150  * given iocb is for the SCSI target specified by vport and tgt_id
10151  * parameters.
10152  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10153  * given iocb is for the SCSI host associated with the given vport.
10154  * This function is called with no locks held.
10155  **/
10156 static int
10157 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10158                            uint16_t tgt_id, uint64_t lun_id,
10159                            lpfc_ctx_cmd ctx_cmd)
10160 {
10161         struct lpfc_scsi_buf *lpfc_cmd;
10162         int rc = 1;
10163
10164         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
10165                 return rc;
10166
10167         if (iocbq->vport != vport)
10168                 return rc;
10169
10170         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10171
10172         if (lpfc_cmd->pCmd == NULL)
10173                 return rc;
10174
10175         switch (ctx_cmd) {
10176         case LPFC_CTX_LUN:
10177                 if ((lpfc_cmd->rdata->pnode) &&
10178                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10179                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10180                         rc = 0;
10181                 break;
10182         case LPFC_CTX_TGT:
10183                 if ((lpfc_cmd->rdata->pnode) &&
10184                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10185                         rc = 0;
10186                 break;
10187         case LPFC_CTX_HOST:
10188                 rc = 0;
10189                 break;
10190         default:
10191                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10192                         __func__, ctx_cmd);
10193                 break;
10194         }
10195
10196         return rc;
10197 }
10198
10199 /**
10200  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
10201  * @vport: Pointer to virtual port.
10202  * @tgt_id: SCSI ID of the target.
10203  * @lun_id: LUN ID of the scsi device.
10204  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10205  *
10206  * This function returns number of FCP commands pending for the vport.
10207  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10208  * commands pending on the vport associated with SCSI device specified
10209  * by tgt_id and lun_id parameters.
10210  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10211  * commands pending on the vport associated with SCSI target specified
10212  * by tgt_id parameter.
10213  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10214  * commands pending on the vport.
10215  * This function returns the number of iocbs which satisfy the filter.
10216  * This function is called without any lock held.
10217  **/
10218 int
10219 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10220                   lpfc_ctx_cmd ctx_cmd)
10221 {
10222         struct lpfc_hba *phba = vport->phba;
10223         struct lpfc_iocbq *iocbq;
10224         int sum, i;
10225
10226         spin_lock_irq(&phba->hbalock);
10227         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10228                 iocbq = phba->sli.iocbq_lookup[i];
10229
10230                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10231                                                 ctx_cmd) == 0)
10232                         sum++;
10233         }
10234         spin_unlock_irq(&phba->hbalock);
10235
10236         return sum;
10237 }
10238
10239 /**
10240  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10241  * @phba: Pointer to HBA context object
10242  * @cmdiocb: Pointer to command iocb object.
10243  * @rspiocb: Pointer to response iocb object.
10244  *
10245  * This function is called when an aborted FCP iocb completes. This
10246  * function is called by the ring event handler with no lock held.
10247  * This function frees the iocb.
10248  **/
10249 void
10250 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10251                         struct lpfc_iocbq *rspiocb)
10252 {
10253         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10254                         "3096 ABORT_XRI_CN completing on rpi x%x "
10255                         "original iotag x%x, abort cmd iotag x%x "
10256                         "status 0x%x, reason 0x%x\n",
10257                         cmdiocb->iocb.un.acxri.abortContextTag,
10258                         cmdiocb->iocb.un.acxri.abortIoTag,
10259                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10260                         rspiocb->iocb.un.ulpWord[4]);
10261         lpfc_sli_release_iocbq(phba, cmdiocb);
10262         return;
10263 }
10264
10265 /**
10266  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10267  * @vport: Pointer to virtual port.
10268  * @pring: Pointer to driver SLI ring object.
10269  * @tgt_id: SCSI ID of the target.
10270  * @lun_id: LUN ID of the scsi device.
10271  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10272  *
10273  * This function sends an abort command for every SCSI command
10274  * associated with the given virtual port pending on the ring
10275  * filtered by lpfc_sli_validate_fcp_iocb function.
10276  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10277  * FCP iocbs associated with lun specified by tgt_id and lun_id
10278  * parameters
10279  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10280  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10281  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10282  * FCP iocbs associated with virtual port.
10283  * This function returns number of iocbs it failed to abort.
10284  * This function is called with no locks held.
10285  **/
10286 int
10287 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10288                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10289 {
10290         struct lpfc_hba *phba = vport->phba;
10291         struct lpfc_iocbq *iocbq;
10292         struct lpfc_iocbq *abtsiocb;
10293         IOCB_t *cmd = NULL;
10294         int errcnt = 0, ret_val = 0;
10295         int i;
10296
10297         for (i = 1; i <= phba->sli.last_iotag; i++) {
10298                 iocbq = phba->sli.iocbq_lookup[i];
10299
10300                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10301                                                abort_cmd) != 0)
10302                         continue;
10303
10304                 /*
10305                  * If the iocbq is already being aborted, don't take a second
10306                  * action, but do count it.
10307                  */
10308                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10309                         continue;
10310
10311                 /* issue ABTS for this IOCB based on iotag */
10312                 abtsiocb = lpfc_sli_get_iocbq(phba);
10313                 if (abtsiocb == NULL) {
10314                         errcnt++;
10315                         continue;
10316                 }
10317
10318                 /* indicate the IO is being aborted by the driver. */
10319                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10320
10321                 cmd = &iocbq->iocb;
10322                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10323                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10324                 if (phba->sli_rev == LPFC_SLI_REV4)
10325                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10326                 else
10327                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10328                 abtsiocb->iocb.ulpLe = 1;
10329                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
10330                 abtsiocb->vport = vport;
10331
10332                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10333                 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
10334                 if (iocbq->iocb_flag & LPFC_IO_FCP)
10335                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10336                 if (iocbq->iocb_flag & LPFC_IO_FOF)
10337                         abtsiocb->iocb_flag |= LPFC_IO_FOF;
10338
10339                 if (lpfc_is_link_up(phba))
10340                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10341                 else
10342                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10343
10344                 /* Setup callback routine and issue the command. */
10345                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10346                 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10347                                               abtsiocb, 0);
10348                 if (ret_val == IOCB_ERROR) {
10349                         lpfc_sli_release_iocbq(phba, abtsiocb);
10350                         errcnt++;
10351                         continue;
10352                 }
10353         }
10354
10355         return errcnt;
10356 }
10357
10358 /**
10359  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10360  * @vport: Pointer to virtual port.
10361  * @pring: Pointer to driver SLI ring object.
10362  * @tgt_id: SCSI ID of the target.
10363  * @lun_id: LUN ID of the scsi device.
10364  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10365  *
10366  * This function sends an abort command for every SCSI command
10367  * associated with the given virtual port pending on the ring
10368  * filtered by lpfc_sli_validate_fcp_iocb function.
10369  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10370  * FCP iocbs associated with lun specified by tgt_id and lun_id
10371  * parameters
10372  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10373  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10374  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10375  * FCP iocbs associated with virtual port.
10376  * This function returns number of iocbs it aborted .
10377  * This function is called with no locks held right after a taskmgmt
10378  * command is sent.
10379  **/
10380 int
10381 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10382                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10383 {
10384         struct lpfc_hba *phba = vport->phba;
10385         struct lpfc_scsi_buf *lpfc_cmd;
10386         struct lpfc_iocbq *abtsiocbq;
10387         struct lpfc_nodelist *ndlp;
10388         struct lpfc_iocbq *iocbq;
10389         IOCB_t *icmd;
10390         int sum, i, ret_val;
10391         unsigned long iflags;
10392         struct lpfc_sli_ring *pring_s4;
10393         uint32_t ring_number;
10394
10395         spin_lock_irq(&phba->hbalock);
10396
10397         /* all I/Os are in process of being flushed */
10398         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10399                 spin_unlock_irq(&phba->hbalock);
10400                 return 0;
10401         }
10402         sum = 0;
10403
10404         for (i = 1; i <= phba->sli.last_iotag; i++) {
10405                 iocbq = phba->sli.iocbq_lookup[i];
10406
10407                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10408                                                cmd) != 0)
10409                         continue;
10410
10411                 /*
10412                  * If the iocbq is already being aborted, don't take a second
10413                  * action, but do count it.
10414                  */
10415                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10416                         continue;
10417
10418                 /* issue ABTS for this IOCB based on iotag */
10419                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10420                 if (abtsiocbq == NULL)
10421                         continue;
10422
10423                 icmd = &iocbq->iocb;
10424                 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10425                 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10426                 if (phba->sli_rev == LPFC_SLI_REV4)
10427                         abtsiocbq->iocb.un.acxri.abortIoTag =
10428                                                          iocbq->sli4_xritag;
10429                 else
10430                         abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10431                 abtsiocbq->iocb.ulpLe = 1;
10432                 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10433                 abtsiocbq->vport = vport;
10434
10435                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10436                 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10437                 if (iocbq->iocb_flag & LPFC_IO_FCP)
10438                         abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10439                 if (iocbq->iocb_flag & LPFC_IO_FOF)
10440                         abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10441
10442                 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10443                 ndlp = lpfc_cmd->rdata->pnode;
10444
10445                 if (lpfc_is_link_up(phba) &&
10446                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10447                         abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10448                 else
10449                         abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10450
10451                 /* Setup callback routine and issue the command. */
10452                 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10453
10454                 /*
10455                  * Indicate the IO is being aborted by the driver and set
10456                  * the caller's flag into the aborted IO.
10457                  */
10458                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10459
10460                 if (phba->sli_rev == LPFC_SLI_REV4) {
10461                         ring_number = MAX_SLI3_CONFIGURED_RINGS +
10462                                          iocbq->fcp_wqidx;
10463                         pring_s4 = &phba->sli.ring[ring_number];
10464                         /* Note: both hbalock and ring_lock must be set here */
10465                         spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10466                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10467                                                         abtsiocbq, 0);
10468                         spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10469                 } else {
10470                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10471                                                         abtsiocbq, 0);
10472                 }
10473
10474
10475                 if (ret_val == IOCB_ERROR)
10476                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
10477                 else
10478                         sum++;
10479         }
10480         spin_unlock_irq(&phba->hbalock);
10481         return sum;
10482 }
10483
10484 /**
10485  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10486  * @phba: Pointer to HBA context object.
10487  * @cmdiocbq: Pointer to command iocb.
10488  * @rspiocbq: Pointer to response iocb.
10489  *
10490  * This function is the completion handler for iocbs issued using
10491  * lpfc_sli_issue_iocb_wait function. This function is called by the
10492  * ring event handler function without any lock held. This function
10493  * can be called from both worker thread context and interrupt
10494  * context. This function also can be called from other thread which
10495  * cleans up the SLI layer objects.
10496  * This function copy the contents of the response iocb to the
10497  * response iocb memory object provided by the caller of
10498  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10499  * sleeps for the iocb completion.
10500  **/
10501 static void
10502 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
10503                         struct lpfc_iocbq *cmdiocbq,
10504                         struct lpfc_iocbq *rspiocbq)
10505 {
10506         wait_queue_head_t *pdone_q;
10507         unsigned long iflags;
10508         struct lpfc_scsi_buf *lpfc_cmd;
10509
10510         spin_lock_irqsave(&phba->hbalock, iflags);
10511         if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
10512
10513                 /*
10514                  * A time out has occurred for the iocb.  If a time out
10515                  * completion handler has been supplied, call it.  Otherwise,
10516                  * just free the iocbq.
10517                  */
10518
10519                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10520                 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
10521                 cmdiocbq->wait_iocb_cmpl = NULL;
10522                 if (cmdiocbq->iocb_cmpl)
10523                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
10524                 else
10525                         lpfc_sli_release_iocbq(phba, cmdiocbq);
10526                 return;
10527         }
10528
10529         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
10530         if (cmdiocbq->context2 && rspiocbq)
10531                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
10532                        &rspiocbq->iocb, sizeof(IOCB_t));
10533
10534         /* Set the exchange busy flag for task management commands */
10535         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
10536                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
10537                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
10538                         cur_iocbq);
10539                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
10540         }
10541
10542         pdone_q = cmdiocbq->context_un.wait_queue;
10543         if (pdone_q)
10544                 wake_up(pdone_q);
10545         spin_unlock_irqrestore(&phba->hbalock, iflags);
10546         return;
10547 }
10548
10549 /**
10550  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
10551  * @phba: Pointer to HBA context object..
10552  * @piocbq: Pointer to command iocb.
10553  * @flag: Flag to test.
10554  *
10555  * This routine grabs the hbalock and then test the iocb_flag to
10556  * see if the passed in flag is set.
10557  * Returns:
10558  * 1 if flag is set.
10559  * 0 if flag is not set.
10560  **/
10561 static int
10562 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
10563                  struct lpfc_iocbq *piocbq, uint32_t flag)
10564 {
10565         unsigned long iflags;
10566         int ret;
10567
10568         spin_lock_irqsave(&phba->hbalock, iflags);
10569         ret = piocbq->iocb_flag & flag;
10570         spin_unlock_irqrestore(&phba->hbalock, iflags);
10571         return ret;
10572
10573 }
10574
10575 /**
10576  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
10577  * @phba: Pointer to HBA context object..
10578  * @pring: Pointer to sli ring.
10579  * @piocb: Pointer to command iocb.
10580  * @prspiocbq: Pointer to response iocb.
10581  * @timeout: Timeout in number of seconds.
10582  *
10583  * This function issues the iocb to firmware and waits for the
10584  * iocb to complete. The iocb_cmpl field of the shall be used
10585  * to handle iocbs which time out. If the field is NULL, the
10586  * function shall free the iocbq structure.  If more clean up is
10587  * needed, the caller is expected to provide a completion function
10588  * that will provide the needed clean up.  If the iocb command is
10589  * not completed within timeout seconds, the function will either
10590  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
10591  * completion function set in the iocb_cmpl field and then return
10592  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
10593  * resources if this function returns IOCB_TIMEDOUT.
10594  * The function waits for the iocb completion using an
10595  * non-interruptible wait.
10596  * This function will sleep while waiting for iocb completion.
10597  * So, this function should not be called from any context which
10598  * does not allow sleeping. Due to the same reason, this function
10599  * cannot be called with interrupt disabled.
10600  * This function assumes that the iocb completions occur while
10601  * this function sleep. So, this function cannot be called from
10602  * the thread which process iocb completion for this ring.
10603  * This function clears the iocb_flag of the iocb object before
10604  * issuing the iocb and the iocb completion handler sets this
10605  * flag and wakes this thread when the iocb completes.
10606  * The contents of the response iocb will be copied to prspiocbq
10607  * by the completion handler when the command completes.
10608  * This function returns IOCB_SUCCESS when success.
10609  * This function is called with no lock held.
10610  **/
10611 int
10612 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10613                          uint32_t ring_number,
10614                          struct lpfc_iocbq *piocb,
10615                          struct lpfc_iocbq *prspiocbq,
10616                          uint32_t timeout)
10617 {
10618         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10619         long timeleft, timeout_req = 0;
10620         int retval = IOCB_SUCCESS;
10621         uint32_t creg_val;
10622         struct lpfc_iocbq *iocb;
10623         int txq_cnt = 0;
10624         int txcmplq_cnt = 0;
10625         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10626         unsigned long iflags;
10627         bool iocb_completed = true;
10628
10629         /*
10630          * If the caller has provided a response iocbq buffer, then context2
10631          * is NULL or its an error.
10632          */
10633         if (prspiocbq) {
10634                 if (piocb->context2)
10635                         return IOCB_ERROR;
10636                 piocb->context2 = prspiocbq;
10637         }
10638
10639         piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
10640         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10641         piocb->context_un.wait_queue = &done_q;
10642         piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
10643
10644         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10645                 if (lpfc_readl(phba->HCregaddr, &creg_val))
10646                         return IOCB_ERROR;
10647                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10648                 writel(creg_val, phba->HCregaddr);
10649                 readl(phba->HCregaddr); /* flush */
10650         }
10651
10652         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10653                                      SLI_IOCB_RET_IOCB);
10654         if (retval == IOCB_SUCCESS) {
10655                 timeout_req = msecs_to_jiffies(timeout * 1000);
10656                 timeleft = wait_event_timeout(done_q,
10657                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10658                                 timeout_req);
10659                 spin_lock_irqsave(&phba->hbalock, iflags);
10660                 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
10661
10662                         /*
10663                          * IOCB timed out.  Inform the wake iocb wait
10664                          * completion function and set local status
10665                          */
10666
10667                         iocb_completed = false;
10668                         piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
10669                 }
10670                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10671                 if (iocb_completed) {
10672                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10673                                         "0331 IOCB wake signaled\n");
10674                         /* Note: we are not indicating if the IOCB has a success
10675                          * status or not - that's for the caller to check.
10676                          * IOCB_SUCCESS means just that the command was sent and
10677                          * completed. Not that it completed successfully.
10678                          * */
10679                 } else if (timeleft == 0) {
10680                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10681                                         "0338 IOCB wait timeout error - no "
10682                                         "wake response Data x%x\n", timeout);
10683                         retval = IOCB_TIMEDOUT;
10684                 } else {
10685                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10686                                         "0330 IOCB wake NOT set, "
10687                                         "Data x%x x%lx\n",
10688                                         timeout, (timeleft / jiffies));
10689                         retval = IOCB_TIMEDOUT;
10690                 }
10691         } else if (retval == IOCB_BUSY) {
10692                 if (phba->cfg_log_verbose & LOG_SLI) {
10693                         list_for_each_entry(iocb, &pring->txq, list) {
10694                                 txq_cnt++;
10695                         }
10696                         list_for_each_entry(iocb, &pring->txcmplq, list) {
10697                                 txcmplq_cnt++;
10698                         }
10699                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10700                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10701                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10702                 }
10703                 return retval;
10704         } else {
10705                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10706                                 "0332 IOCB wait issue failed, Data x%x\n",
10707                                 retval);
10708                 retval = IOCB_ERROR;
10709         }
10710
10711         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10712                 if (lpfc_readl(phba->HCregaddr, &creg_val))
10713                         return IOCB_ERROR;
10714                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10715                 writel(creg_val, phba->HCregaddr);
10716                 readl(phba->HCregaddr); /* flush */
10717         }
10718
10719         if (prspiocbq)
10720                 piocb->context2 = NULL;
10721
10722         piocb->context_un.wait_queue = NULL;
10723         piocb->iocb_cmpl = NULL;
10724         return retval;
10725 }
10726
10727 /**
10728  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10729  * @phba: Pointer to HBA context object.
10730  * @pmboxq: Pointer to driver mailbox object.
10731  * @timeout: Timeout in number of seconds.
10732  *
10733  * This function issues the mailbox to firmware and waits for the
10734  * mailbox command to complete. If the mailbox command is not
10735  * completed within timeout seconds, it returns MBX_TIMEOUT.
10736  * The function waits for the mailbox completion using an
10737  * interruptible wait. If the thread is woken up due to a
10738  * signal, MBX_TIMEOUT error is returned to the caller. Caller
10739  * should not free the mailbox resources, if this function returns
10740  * MBX_TIMEOUT.
10741  * This function will sleep while waiting for mailbox completion.
10742  * So, this function should not be called from any context which
10743  * does not allow sleeping. Due to the same reason, this function
10744  * cannot be called with interrupt disabled.
10745  * This function assumes that the mailbox completion occurs while
10746  * this function sleep. So, this function cannot be called from
10747  * the worker thread which processes mailbox completion.
10748  * This function is called in the context of HBA management
10749  * applications.
10750  * This function returns MBX_SUCCESS when successful.
10751  * This function is called with no lock held.
10752  **/
10753 int
10754 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10755                          uint32_t timeout)
10756 {
10757         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10758         MAILBOX_t *mb = NULL;
10759         int retval;
10760         unsigned long flag;
10761
10762         /* The caller might set context1 for extended buffer */
10763         if (pmboxq->context1)
10764                 mb = (MAILBOX_t *)pmboxq->context1;
10765
10766         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10767         /* setup wake call as IOCB callback */
10768         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10769         /* setup context field to pass wait_queue pointer to wake function  */
10770         pmboxq->context1 = &done_q;
10771
10772         /* now issue the command */
10773         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10774         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10775                 wait_event_interruptible_timeout(done_q,
10776                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10777                                 msecs_to_jiffies(timeout * 1000));
10778
10779                 spin_lock_irqsave(&phba->hbalock, flag);
10780                 /* restore the possible extended buffer for free resource */
10781                 pmboxq->context1 = (uint8_t *)mb;
10782                 /*
10783                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
10784                  * else do not free the resources.
10785                  */
10786                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10787                         retval = MBX_SUCCESS;
10788                 } else {
10789                         retval = MBX_TIMEOUT;
10790                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10791                 }
10792                 spin_unlock_irqrestore(&phba->hbalock, flag);
10793         } else {
10794                 /* restore the possible extended buffer for free resource */
10795                 pmboxq->context1 = (uint8_t *)mb;
10796         }
10797
10798         return retval;
10799 }
10800
10801 /**
10802  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10803  * @phba: Pointer to HBA context.
10804  *
10805  * This function is called to shutdown the driver's mailbox sub-system.
10806  * It first marks the mailbox sub-system is in a block state to prevent
10807  * the asynchronous mailbox command from issued off the pending mailbox
10808  * command queue. If the mailbox command sub-system shutdown is due to
10809  * HBA error conditions such as EEH or ERATT, this routine shall invoke
10810  * the mailbox sub-system flush routine to forcefully bring down the
10811  * mailbox sub-system. Otherwise, if it is due to normal condition (such
10812  * as with offline or HBA function reset), this routine will wait for the
10813  * outstanding mailbox command to complete before invoking the mailbox
10814  * sub-system flush routine to gracefully bring down mailbox sub-system.
10815  **/
10816 void
10817 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10818 {
10819         struct lpfc_sli *psli = &phba->sli;
10820         unsigned long timeout;
10821
10822         if (mbx_action == LPFC_MBX_NO_WAIT) {
10823                 /* delay 100ms for port state */
10824                 msleep(100);
10825                 lpfc_sli_mbox_sys_flush(phba);
10826                 return;
10827         }
10828         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10829
10830         spin_lock_irq(&phba->hbalock);
10831         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10832
10833         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10834                 /* Determine how long we might wait for the active mailbox
10835                  * command to be gracefully completed by firmware.
10836                  */
10837                 if (phba->sli.mbox_active)
10838                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10839                                                 phba->sli.mbox_active) *
10840                                                 1000) + jiffies;
10841                 spin_unlock_irq(&phba->hbalock);
10842
10843                 while (phba->sli.mbox_active) {
10844                         /* Check active mailbox complete status every 2ms */
10845                         msleep(2);
10846                         if (time_after(jiffies, timeout))
10847                                 /* Timeout, let the mailbox flush routine to
10848                                  * forcefully release active mailbox command
10849                                  */
10850                                 break;
10851                 }
10852         } else
10853                 spin_unlock_irq(&phba->hbalock);
10854
10855         lpfc_sli_mbox_sys_flush(phba);
10856 }
10857
10858 /**
10859  * lpfc_sli_eratt_read - read sli-3 error attention events
10860  * @phba: Pointer to HBA context.
10861  *
10862  * This function is called to read the SLI3 device error attention registers
10863  * for possible error attention events. The caller must hold the hostlock
10864  * with spin_lock_irq().
10865  *
10866  * This function returns 1 when there is Error Attention in the Host Attention
10867  * Register and returns 0 otherwise.
10868  **/
10869 static int
10870 lpfc_sli_eratt_read(struct lpfc_hba *phba)
10871 {
10872         uint32_t ha_copy;
10873
10874         /* Read chip Host Attention (HA) register */
10875         if (lpfc_readl(phba->HAregaddr, &ha_copy))
10876                 goto unplug_err;
10877
10878         if (ha_copy & HA_ERATT) {
10879                 /* Read host status register to retrieve error event */
10880                 if (lpfc_sli_read_hs(phba))
10881                         goto unplug_err;
10882
10883                 /* Check if there is a deferred error condition is active */
10884                 if ((HS_FFER1 & phba->work_hs) &&
10885                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10886                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10887                         phba->hba_flag |= DEFER_ERATT;
10888                         /* Clear all interrupt enable conditions */
10889                         writel(0, phba->HCregaddr);
10890                         readl(phba->HCregaddr);
10891                 }
10892
10893                 /* Set the driver HA work bitmap */
10894                 phba->work_ha |= HA_ERATT;
10895                 /* Indicate polling handles this ERATT */
10896                 phba->hba_flag |= HBA_ERATT_HANDLED;
10897                 return 1;
10898         }
10899         return 0;
10900
10901 unplug_err:
10902         /* Set the driver HS work bitmap */
10903         phba->work_hs |= UNPLUG_ERR;
10904         /* Set the driver HA work bitmap */
10905         phba->work_ha |= HA_ERATT;
10906         /* Indicate polling handles this ERATT */
10907         phba->hba_flag |= HBA_ERATT_HANDLED;
10908         return 1;
10909 }
10910
10911 /**
10912  * lpfc_sli4_eratt_read - read sli-4 error attention events
10913  * @phba: Pointer to HBA context.
10914  *
10915  * This function is called to read the SLI4 device error attention registers
10916  * for possible error attention events. The caller must hold the hostlock
10917  * with spin_lock_irq().
10918  *
10919  * This function returns 1 when there is Error Attention in the Host Attention
10920  * Register and returns 0 otherwise.
10921  **/
10922 static int
10923 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10924 {
10925         uint32_t uerr_sta_hi, uerr_sta_lo;
10926         uint32_t if_type, portsmphr;
10927         struct lpfc_register portstat_reg;
10928
10929         /*
10930          * For now, use the SLI4 device internal unrecoverable error
10931          * registers for error attention. This can be changed later.
10932          */
10933         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10934         switch (if_type) {
10935         case LPFC_SLI_INTF_IF_TYPE_0:
10936                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10937                         &uerr_sta_lo) ||
10938                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10939                         &uerr_sta_hi)) {
10940                         phba->work_hs |= UNPLUG_ERR;
10941                         phba->work_ha |= HA_ERATT;
10942                         phba->hba_flag |= HBA_ERATT_HANDLED;
10943                         return 1;
10944                 }
10945                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10946                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10947                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10948                                         "1423 HBA Unrecoverable error: "
10949                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10950                                         "ue_mask_lo_reg=0x%x, "
10951                                         "ue_mask_hi_reg=0x%x\n",
10952                                         uerr_sta_lo, uerr_sta_hi,
10953                                         phba->sli4_hba.ue_mask_lo,
10954                                         phba->sli4_hba.ue_mask_hi);
10955                         phba->work_status[0] = uerr_sta_lo;
10956                         phba->work_status[1] = uerr_sta_hi;
10957                         phba->work_ha |= HA_ERATT;
10958                         phba->hba_flag |= HBA_ERATT_HANDLED;
10959                         return 1;
10960                 }
10961                 break;
10962         case LPFC_SLI_INTF_IF_TYPE_2:
10963                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10964                         &portstat_reg.word0) ||
10965                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10966                         &portsmphr)){
10967                         phba->work_hs |= UNPLUG_ERR;
10968                         phba->work_ha |= HA_ERATT;
10969                         phba->hba_flag |= HBA_ERATT_HANDLED;
10970                         return 1;
10971                 }
10972                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10973                         phba->work_status[0] =
10974                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10975                         phba->work_status[1] =
10976                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10977                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10978                                         "2885 Port Status Event: "
10979                                         "port status reg 0x%x, "
10980                                         "port smphr reg 0x%x, "
10981                                         "error 1=0x%x, error 2=0x%x\n",
10982                                         portstat_reg.word0,
10983                                         portsmphr,
10984                                         phba->work_status[0],
10985                                         phba->work_status[1]);
10986                         phba->work_ha |= HA_ERATT;
10987                         phba->hba_flag |= HBA_ERATT_HANDLED;
10988                         return 1;
10989                 }
10990                 break;
10991         case LPFC_SLI_INTF_IF_TYPE_1:
10992         default:
10993                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10994                                 "2886 HBA Error Attention on unsupported "
10995                                 "if type %d.", if_type);
10996                 return 1;
10997         }
10998
10999         return 0;
11000 }
11001
11002 /**
11003  * lpfc_sli_check_eratt - check error attention events
11004  * @phba: Pointer to HBA context.
11005  *
11006  * This function is called from timer soft interrupt context to check HBA's
11007  * error attention register bit for error attention events.
11008  *
11009  * This function returns 1 when there is Error Attention in the Host Attention
11010  * Register and returns 0 otherwise.
11011  **/
11012 int
11013 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11014 {
11015         uint32_t ha_copy;
11016
11017         /* If somebody is waiting to handle an eratt, don't process it
11018          * here. The brdkill function will do this.
11019          */
11020         if (phba->link_flag & LS_IGNORE_ERATT)
11021                 return 0;
11022
11023         /* Check if interrupt handler handles this ERATT */
11024         spin_lock_irq(&phba->hbalock);
11025         if (phba->hba_flag & HBA_ERATT_HANDLED) {
11026                 /* Interrupt handler has handled ERATT */
11027                 spin_unlock_irq(&phba->hbalock);
11028                 return 0;
11029         }
11030
11031         /*
11032          * If there is deferred error attention, do not check for error
11033          * attention
11034          */
11035         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11036                 spin_unlock_irq(&phba->hbalock);
11037                 return 0;
11038         }
11039
11040         /* If PCI channel is offline, don't process it */
11041         if (unlikely(pci_channel_offline(phba->pcidev))) {
11042                 spin_unlock_irq(&phba->hbalock);
11043                 return 0;
11044         }
11045
11046         switch (phba->sli_rev) {
11047         case LPFC_SLI_REV2:
11048         case LPFC_SLI_REV3:
11049                 /* Read chip Host Attention (HA) register */
11050                 ha_copy = lpfc_sli_eratt_read(phba);
11051                 break;
11052         case LPFC_SLI_REV4:
11053                 /* Read device Uncoverable Error (UERR) registers */
11054                 ha_copy = lpfc_sli4_eratt_read(phba);
11055                 break;
11056         default:
11057                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11058                                 "0299 Invalid SLI revision (%d)\n",
11059                                 phba->sli_rev);
11060                 ha_copy = 0;
11061                 break;
11062         }
11063         spin_unlock_irq(&phba->hbalock);
11064
11065         return ha_copy;
11066 }
11067
11068 /**
11069  * lpfc_intr_state_check - Check device state for interrupt handling
11070  * @phba: Pointer to HBA context.
11071  *
11072  * This inline routine checks whether a device or its PCI slot is in a state
11073  * that the interrupt should be handled.
11074  *
11075  * This function returns 0 if the device or the PCI slot is in a state that
11076  * interrupt should be handled, otherwise -EIO.
11077  */
11078 static inline int
11079 lpfc_intr_state_check(struct lpfc_hba *phba)
11080 {
11081         /* If the pci channel is offline, ignore all the interrupts */
11082         if (unlikely(pci_channel_offline(phba->pcidev)))
11083                 return -EIO;
11084
11085         /* Update device level interrupt statistics */
11086         phba->sli.slistat.sli_intr++;
11087
11088         /* Ignore all interrupts during initialization. */
11089         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11090                 return -EIO;
11091
11092         return 0;
11093 }
11094
11095 /**
11096  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11097  * @irq: Interrupt number.
11098  * @dev_id: The device context pointer.
11099  *
11100  * This function is directly called from the PCI layer as an interrupt
11101  * service routine when device with SLI-3 interface spec is enabled with
11102  * MSI-X multi-message interrupt mode and there are slow-path events in
11103  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11104  * interrupt mode, this function is called as part of the device-level
11105  * interrupt handler. When the PCI slot is in error recovery or the HBA
11106  * is undergoing initialization, the interrupt handler will not process
11107  * the interrupt. The link attention and ELS ring attention events are
11108  * handled by the worker thread. The interrupt handler signals the worker
11109  * thread and returns for these events. This function is called without
11110  * any lock held. It gets the hbalock to access and update SLI data
11111  * structures.
11112  *
11113  * This function returns IRQ_HANDLED when interrupt is handled else it
11114  * returns IRQ_NONE.
11115  **/
11116 irqreturn_t
11117 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11118 {
11119         struct lpfc_hba  *phba;
11120         uint32_t ha_copy, hc_copy;
11121         uint32_t work_ha_copy;
11122         unsigned long status;
11123         unsigned long iflag;
11124         uint32_t control;
11125
11126         MAILBOX_t *mbox, *pmbox;
11127         struct lpfc_vport *vport;
11128         struct lpfc_nodelist *ndlp;
11129         struct lpfc_dmabuf *mp;
11130         LPFC_MBOXQ_t *pmb;
11131         int rc;
11132
11133         /*
11134          * Get the driver's phba structure from the dev_id and
11135          * assume the HBA is not interrupting.
11136          */
11137         phba = (struct lpfc_hba *)dev_id;
11138
11139         if (unlikely(!phba))
11140                 return IRQ_NONE;
11141
11142         /*
11143          * Stuff needs to be attented to when this function is invoked as an
11144          * individual interrupt handler in MSI-X multi-message interrupt mode
11145          */
11146         if (phba->intr_type == MSIX) {
11147                 /* Check device state for handling interrupt */
11148                 if (lpfc_intr_state_check(phba))
11149                         return IRQ_NONE;
11150                 /* Need to read HA REG for slow-path events */
11151                 spin_lock_irqsave(&phba->hbalock, iflag);
11152                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11153                         goto unplug_error;
11154                 /* If somebody is waiting to handle an eratt don't process it
11155                  * here. The brdkill function will do this.
11156                  */
11157                 if (phba->link_flag & LS_IGNORE_ERATT)
11158                         ha_copy &= ~HA_ERATT;
11159                 /* Check the need for handling ERATT in interrupt handler */
11160                 if (ha_copy & HA_ERATT) {
11161                         if (phba->hba_flag & HBA_ERATT_HANDLED)
11162                                 /* ERATT polling has handled ERATT */
11163                                 ha_copy &= ~HA_ERATT;
11164                         else
11165                                 /* Indicate interrupt handler handles ERATT */
11166                                 phba->hba_flag |= HBA_ERATT_HANDLED;
11167                 }
11168
11169                 /*
11170                  * If there is deferred error attention, do not check for any
11171                  * interrupt.
11172                  */
11173                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11174                         spin_unlock_irqrestore(&phba->hbalock, iflag);
11175                         return IRQ_NONE;
11176                 }
11177
11178                 /* Clear up only attention source related to slow-path */
11179                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11180                         goto unplug_error;
11181
11182                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11183                         HC_LAINT_ENA | HC_ERINT_ENA),
11184                         phba->HCregaddr);
11185                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11186                         phba->HAregaddr);
11187                 writel(hc_copy, phba->HCregaddr);
11188                 readl(phba->HAregaddr); /* flush */
11189                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11190         } else
11191                 ha_copy = phba->ha_copy;
11192
11193         work_ha_copy = ha_copy & phba->work_ha_mask;
11194
11195         if (work_ha_copy) {
11196                 if (work_ha_copy & HA_LATT) {
11197                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11198                                 /*
11199                                  * Turn off Link Attention interrupts
11200                                  * until CLEAR_LA done
11201                                  */
11202                                 spin_lock_irqsave(&phba->hbalock, iflag);
11203                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11204                                 if (lpfc_readl(phba->HCregaddr, &control))
11205                                         goto unplug_error;
11206                                 control &= ~HC_LAINT_ENA;
11207                                 writel(control, phba->HCregaddr);
11208                                 readl(phba->HCregaddr); /* flush */
11209                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11210                         }
11211                         else
11212                                 work_ha_copy &= ~HA_LATT;
11213                 }
11214
11215                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11216                         /*
11217                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11218                          * the only slow ring.
11219                          */
11220                         status = (work_ha_copy &
11221                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
11222                         status >>= (4*LPFC_ELS_RING);
11223                         if (status & HA_RXMASK) {
11224                                 spin_lock_irqsave(&phba->hbalock, iflag);
11225                                 if (lpfc_readl(phba->HCregaddr, &control))
11226                                         goto unplug_error;
11227
11228                                 lpfc_debugfs_slow_ring_trc(phba,
11229                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
11230                                 control, status,
11231                                 (uint32_t)phba->sli.slistat.sli_intr);
11232
11233                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11234                                         lpfc_debugfs_slow_ring_trc(phba,
11235                                                 "ISR Disable ring:"
11236                                                 "pwork:x%x hawork:x%x wait:x%x",
11237                                                 phba->work_ha, work_ha_copy,
11238                                                 (uint32_t)((unsigned long)
11239                                                 &phba->work_waitq));
11240
11241                                         control &=
11242                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
11243                                         writel(control, phba->HCregaddr);
11244                                         readl(phba->HCregaddr); /* flush */
11245                                 }
11246                                 else {
11247                                         lpfc_debugfs_slow_ring_trc(phba,
11248                                                 "ISR slow ring:   pwork:"
11249                                                 "x%x hawork:x%x wait:x%x",
11250                                                 phba->work_ha, work_ha_copy,
11251                                                 (uint32_t)((unsigned long)
11252                                                 &phba->work_waitq));
11253                                 }
11254                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11255                         }
11256                 }
11257                 spin_lock_irqsave(&phba->hbalock, iflag);
11258                 if (work_ha_copy & HA_ERATT) {
11259                         if (lpfc_sli_read_hs(phba))
11260                                 goto unplug_error;
11261                         /*
11262                          * Check if there is a deferred error condition
11263                          * is active
11264                          */
11265                         if ((HS_FFER1 & phba->work_hs) &&
11266                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11267                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
11268                                   phba->work_hs)) {
11269                                 phba->hba_flag |= DEFER_ERATT;
11270                                 /* Clear all interrupt enable conditions */
11271                                 writel(0, phba->HCregaddr);
11272                                 readl(phba->HCregaddr);
11273                         }
11274                 }
11275
11276                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11277                         pmb = phba->sli.mbox_active;
11278                         pmbox = &pmb->u.mb;
11279                         mbox = phba->mbox;
11280                         vport = pmb->vport;
11281
11282                         /* First check out the status word */
11283                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11284                         if (pmbox->mbxOwner != OWN_HOST) {
11285                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11286                                 /*
11287                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
11288                                  * mbxStatus <status>
11289                                  */
11290                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11291                                                 LOG_SLI,
11292                                                 "(%d):0304 Stray Mailbox "
11293                                                 "Interrupt mbxCommand x%x "
11294                                                 "mbxStatus x%x\n",
11295                                                 (vport ? vport->vpi : 0),
11296                                                 pmbox->mbxCommand,
11297                                                 pmbox->mbxStatus);
11298                                 /* clear mailbox attention bit */
11299                                 work_ha_copy &= ~HA_MBATT;
11300                         } else {
11301                                 phba->sli.mbox_active = NULL;
11302                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11303                                 phba->last_completion_time = jiffies;
11304                                 del_timer(&phba->sli.mbox_tmo);
11305                                 if (pmb->mbox_cmpl) {
11306                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
11307                                                         MAILBOX_CMD_SIZE);
11308                                         if (pmb->out_ext_byte_len &&
11309                                                 pmb->context2)
11310                                                 lpfc_sli_pcimem_bcopy(
11311                                                 phba->mbox_ext,
11312                                                 pmb->context2,
11313                                                 pmb->out_ext_byte_len);
11314                                 }
11315                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11316                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11317
11318                                         lpfc_debugfs_disc_trc(vport,
11319                                                 LPFC_DISC_TRC_MBOX_VPORT,
11320                                                 "MBOX dflt rpi: : "
11321                                                 "status:x%x rpi:x%x",
11322                                                 (uint32_t)pmbox->mbxStatus,
11323                                                 pmbox->un.varWords[0], 0);
11324
11325                                         if (!pmbox->mbxStatus) {
11326                                                 mp = (struct lpfc_dmabuf *)
11327                                                         (pmb->context1);
11328                                                 ndlp = (struct lpfc_nodelist *)
11329                                                         pmb->context2;
11330
11331                                                 /* Reg_LOGIN of dflt RPI was
11332                                                  * successful. new lets get
11333                                                  * rid of the RPI using the
11334                                                  * same mbox buffer.
11335                                                  */
11336                                                 lpfc_unreg_login(phba,
11337                                                         vport->vpi,
11338                                                         pmbox->un.varWords[0],
11339                                                         pmb);
11340                                                 pmb->mbox_cmpl =
11341                                                         lpfc_mbx_cmpl_dflt_rpi;
11342                                                 pmb->context1 = mp;
11343                                                 pmb->context2 = ndlp;
11344                                                 pmb->vport = vport;
11345                                                 rc = lpfc_sli_issue_mbox(phba,
11346                                                                 pmb,
11347                                                                 MBX_NOWAIT);
11348                                                 if (rc != MBX_BUSY)
11349                                                         lpfc_printf_log(phba,
11350                                                         KERN_ERR,
11351                                                         LOG_MBOX | LOG_SLI,
11352                                                         "0350 rc should have"
11353                                                         "been MBX_BUSY\n");
11354                                                 if (rc != MBX_NOT_FINISHED)
11355                                                         goto send_current_mbox;
11356                                         }
11357                                 }
11358                                 spin_lock_irqsave(
11359                                                 &phba->pport->work_port_lock,
11360                                                 iflag);
11361                                 phba->pport->work_port_events &=
11362                                         ~WORKER_MBOX_TMO;
11363                                 spin_unlock_irqrestore(
11364                                                 &phba->pport->work_port_lock,
11365                                                 iflag);
11366                                 lpfc_mbox_cmpl_put(phba, pmb);
11367                         }
11368                 } else
11369                         spin_unlock_irqrestore(&phba->hbalock, iflag);
11370
11371                 if ((work_ha_copy & HA_MBATT) &&
11372                     (phba->sli.mbox_active == NULL)) {
11373 send_current_mbox:
11374                         /* Process next mailbox command if there is one */
11375                         do {
11376                                 rc = lpfc_sli_issue_mbox(phba, NULL,
11377                                                          MBX_NOWAIT);
11378                         } while (rc == MBX_NOT_FINISHED);
11379                         if (rc != MBX_SUCCESS)
11380                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11381                                                 LOG_SLI, "0349 rc should be "
11382                                                 "MBX_SUCCESS\n");
11383                 }
11384
11385                 spin_lock_irqsave(&phba->hbalock, iflag);
11386                 phba->work_ha |= work_ha_copy;
11387                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11388                 lpfc_worker_wake_up(phba);
11389         }
11390         return IRQ_HANDLED;
11391 unplug_error:
11392         spin_unlock_irqrestore(&phba->hbalock, iflag);
11393         return IRQ_HANDLED;
11394
11395 } /* lpfc_sli_sp_intr_handler */
11396
11397 /**
11398  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
11399  * @irq: Interrupt number.
11400  * @dev_id: The device context pointer.
11401  *
11402  * This function is directly called from the PCI layer as an interrupt
11403  * service routine when device with SLI-3 interface spec is enabled with
11404  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11405  * ring event in the HBA. However, when the device is enabled with either
11406  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11407  * device-level interrupt handler. When the PCI slot is in error recovery
11408  * or the HBA is undergoing initialization, the interrupt handler will not
11409  * process the interrupt. The SCSI FCP fast-path ring event are handled in
11410  * the intrrupt context. This function is called without any lock held.
11411  * It gets the hbalock to access and update SLI data structures.
11412  *
11413  * This function returns IRQ_HANDLED when interrupt is handled else it
11414  * returns IRQ_NONE.
11415  **/
11416 irqreturn_t
11417 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
11418 {
11419         struct lpfc_hba  *phba;
11420         uint32_t ha_copy;
11421         unsigned long status;
11422         unsigned long iflag;
11423
11424         /* Get the driver's phba structure from the dev_id and
11425          * assume the HBA is not interrupting.
11426          */
11427         phba = (struct lpfc_hba *) dev_id;
11428
11429         if (unlikely(!phba))
11430                 return IRQ_NONE;
11431
11432         /*
11433          * Stuff needs to be attented to when this function is invoked as an
11434          * individual interrupt handler in MSI-X multi-message interrupt mode
11435          */
11436         if (phba->intr_type == MSIX) {
11437                 /* Check device state for handling interrupt */
11438                 if (lpfc_intr_state_check(phba))
11439                         return IRQ_NONE;
11440                 /* Need to read HA REG for FCP ring and other ring events */
11441                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11442                         return IRQ_HANDLED;
11443                 /* Clear up only attention source related to fast-path */
11444                 spin_lock_irqsave(&phba->hbalock, iflag);
11445                 /*
11446                  * If there is deferred error attention, do not check for
11447                  * any interrupt.
11448                  */
11449                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11450                         spin_unlock_irqrestore(&phba->hbalock, iflag);
11451                         return IRQ_NONE;
11452                 }
11453                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11454                         phba->HAregaddr);
11455                 readl(phba->HAregaddr); /* flush */
11456                 spin_unlock_irqrestore(&phba->hbalock, iflag);
11457         } else
11458                 ha_copy = phba->ha_copy;
11459
11460         /*
11461          * Process all events on FCP ring. Take the optimized path for FCP IO.
11462          */
11463         ha_copy &= ~(phba->work_ha_mask);
11464
11465         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11466         status >>= (4*LPFC_FCP_RING);
11467         if (status & HA_RXMASK)
11468                 lpfc_sli_handle_fast_ring_event(phba,
11469                                                 &phba->sli.ring[LPFC_FCP_RING],
11470                                                 status);
11471
11472         if (phba->cfg_multi_ring_support == 2) {
11473                 /*
11474                  * Process all events on extra ring. Take the optimized path
11475                  * for extra ring IO.
11476                  */
11477                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11478                 status >>= (4*LPFC_EXTRA_RING);
11479                 if (status & HA_RXMASK) {
11480                         lpfc_sli_handle_fast_ring_event(phba,
11481                                         &phba->sli.ring[LPFC_EXTRA_RING],
11482                                         status);
11483                 }
11484         }
11485         return IRQ_HANDLED;
11486 }  /* lpfc_sli_fp_intr_handler */
11487
11488 /**
11489  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
11490  * @irq: Interrupt number.
11491  * @dev_id: The device context pointer.
11492  *
11493  * This function is the HBA device-level interrupt handler to device with
11494  * SLI-3 interface spec, called from the PCI layer when either MSI or
11495  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
11496  * requires driver attention. This function invokes the slow-path interrupt
11497  * attention handling function and fast-path interrupt attention handling
11498  * function in turn to process the relevant HBA attention events. This
11499  * function is called without any lock held. It gets the hbalock to access
11500  * and update SLI data structures.
11501  *
11502  * This function returns IRQ_HANDLED when interrupt is handled, else it
11503  * returns IRQ_NONE.
11504  **/
11505 irqreturn_t
11506 lpfc_sli_intr_handler(int irq, void *dev_id)
11507 {
11508         struct lpfc_hba  *phba;
11509         irqreturn_t sp_irq_rc, fp_irq_rc;
11510         unsigned long status1, status2;
11511         uint32_t hc_copy;
11512
11513         /*
11514          * Get the driver's phba structure from the dev_id and
11515          * assume the HBA is not interrupting.
11516          */
11517         phba = (struct lpfc_hba *) dev_id;
11518
11519         if (unlikely(!phba))
11520                 return IRQ_NONE;
11521
11522         /* Check device state for handling interrupt */
11523         if (lpfc_intr_state_check(phba))
11524                 return IRQ_NONE;
11525
11526         spin_lock(&phba->hbalock);
11527         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
11528                 spin_unlock(&phba->hbalock);
11529                 return IRQ_HANDLED;
11530         }
11531
11532         if (unlikely(!phba->ha_copy)) {
11533                 spin_unlock(&phba->hbalock);
11534                 return IRQ_NONE;
11535         } else if (phba->ha_copy & HA_ERATT) {
11536                 if (phba->hba_flag & HBA_ERATT_HANDLED)
11537                         /* ERATT polling has handled ERATT */
11538                         phba->ha_copy &= ~HA_ERATT;
11539                 else
11540                         /* Indicate interrupt handler handles ERATT */
11541                         phba->hba_flag |= HBA_ERATT_HANDLED;
11542         }
11543
11544         /*
11545          * If there is deferred error attention, do not check for any interrupt.
11546          */
11547         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11548                 spin_unlock(&phba->hbalock);
11549                 return IRQ_NONE;
11550         }
11551
11552         /* Clear attention sources except link and error attentions */
11553         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
11554                 spin_unlock(&phba->hbalock);
11555                 return IRQ_HANDLED;
11556         }
11557         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
11558                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
11559                 phba->HCregaddr);
11560         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
11561         writel(hc_copy, phba->HCregaddr);
11562         readl(phba->HAregaddr); /* flush */
11563         spin_unlock(&phba->hbalock);
11564
11565         /*
11566          * Invokes slow-path host attention interrupt handling as appropriate.
11567          */
11568
11569         /* status of events with mailbox and link attention */
11570         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
11571
11572         /* status of events with ELS ring */
11573         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
11574         status2 >>= (4*LPFC_ELS_RING);
11575
11576         if (status1 || (status2 & HA_RXMASK))
11577                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
11578         else
11579                 sp_irq_rc = IRQ_NONE;
11580
11581         /*
11582          * Invoke fast-path host attention interrupt handling as appropriate.
11583          */
11584
11585         /* status of events with FCP ring */
11586         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11587         status1 >>= (4*LPFC_FCP_RING);
11588
11589         /* status of events with extra ring */
11590         if (phba->cfg_multi_ring_support == 2) {
11591                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11592                 status2 >>= (4*LPFC_EXTRA_RING);
11593         } else
11594                 status2 = 0;
11595
11596         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
11597                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
11598         else
11599                 fp_irq_rc = IRQ_NONE;
11600
11601         /* Return device-level interrupt handling status */
11602         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
11603 }  /* lpfc_sli_intr_handler */
11604
11605 /**
11606  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
11607  * @phba: pointer to lpfc hba data structure.
11608  *
11609  * This routine is invoked by the worker thread to process all the pending
11610  * SLI4 FCP abort XRI events.
11611  **/
11612 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
11613 {
11614         struct lpfc_cq_event *cq_event;
11615
11616         /* First, declare the fcp xri abort event has been handled */
11617         spin_lock_irq(&phba->hbalock);
11618         phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
11619         spin_unlock_irq(&phba->hbalock);
11620         /* Now, handle all the fcp xri abort events */
11621         while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
11622                 /* Get the first event from the head of the event queue */
11623                 spin_lock_irq(&phba->hbalock);
11624                 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
11625                                  cq_event, struct lpfc_cq_event, list);
11626                 spin_unlock_irq(&phba->hbalock);
11627                 /* Notify aborted XRI for FCP work queue */
11628                 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11629                 /* Free the event processed back to the free pool */
11630                 lpfc_sli4_cq_event_release(phba, cq_event);
11631         }
11632 }
11633
11634 /**
11635  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
11636  * @phba: pointer to lpfc hba data structure.
11637  *
11638  * This routine is invoked by the worker thread to process all the pending
11639  * SLI4 els abort xri events.
11640  **/
11641 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
11642 {
11643         struct lpfc_cq_event *cq_event;
11644
11645         /* First, declare the els xri abort event has been handled */
11646         spin_lock_irq(&phba->hbalock);
11647         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
11648         spin_unlock_irq(&phba->hbalock);
11649         /* Now, handle all the els xri abort events */
11650         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
11651                 /* Get the first event from the head of the event queue */
11652                 spin_lock_irq(&phba->hbalock);
11653                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11654                                  cq_event, struct lpfc_cq_event, list);
11655                 spin_unlock_irq(&phba->hbalock);
11656                 /* Notify aborted XRI for ELS work queue */
11657                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11658                 /* Free the event processed back to the free pool */
11659                 lpfc_sli4_cq_event_release(phba, cq_event);
11660         }
11661 }
11662
11663 /**
11664  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11665  * @phba: pointer to lpfc hba data structure
11666  * @pIocbIn: pointer to the rspiocbq
11667  * @pIocbOut: pointer to the cmdiocbq
11668  * @wcqe: pointer to the complete wcqe
11669  *
11670  * This routine transfers the fields of a command iocbq to a response iocbq
11671  * by copying all the IOCB fields from command iocbq and transferring the
11672  * completion status information from the complete wcqe.
11673  **/
11674 static void
11675 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11676                               struct lpfc_iocbq *pIocbIn,
11677                               struct lpfc_iocbq *pIocbOut,
11678                               struct lpfc_wcqe_complete *wcqe)
11679 {
11680         int numBdes, i;
11681         unsigned long iflags;
11682         uint32_t status, max_response;
11683         struct lpfc_dmabuf *dmabuf;
11684         struct ulp_bde64 *bpl, bde;
11685         size_t offset = offsetof(struct lpfc_iocbq, iocb);
11686
11687         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11688                sizeof(struct lpfc_iocbq) - offset);
11689         /* Map WCQE parameters into irspiocb parameters */
11690         status = bf_get(lpfc_wcqe_c_status, wcqe);
11691         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11692         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11693                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11694                         pIocbIn->iocb.un.fcpi.fcpi_parm =
11695                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
11696                                         wcqe->total_data_placed;
11697                 else
11698                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11699         else {
11700                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11701                 switch (pIocbOut->iocb.ulpCommand) {
11702                 case CMD_ELS_REQUEST64_CR:
11703                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11704                         bpl  = (struct ulp_bde64 *)dmabuf->virt;
11705                         bde.tus.w = le32_to_cpu(bpl[1].tus.w);
11706                         max_response = bde.tus.f.bdeSize;
11707                         break;
11708                 case CMD_GEN_REQUEST64_CR:
11709                         max_response = 0;
11710                         if (!pIocbOut->context3)
11711                                 break;
11712                         numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
11713                                         sizeof(struct ulp_bde64);
11714                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11715                         bpl = (struct ulp_bde64 *)dmabuf->virt;
11716                         for (i = 0; i < numBdes; i++) {
11717                                 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
11718                                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
11719                                         max_response += bde.tus.f.bdeSize;
11720                         }
11721                         break;
11722                 default:
11723                         max_response = wcqe->total_data_placed;
11724                         break;
11725                 }
11726                 if (max_response < wcqe->total_data_placed)
11727                         pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
11728                 else
11729                         pIocbIn->iocb.un.genreq64.bdl.bdeSize =
11730                                 wcqe->total_data_placed;
11731         }
11732
11733         /* Convert BG errors for completion status */
11734         if (status == CQE_STATUS_DI_ERROR) {
11735                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11736
11737                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11738                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11739                 else
11740                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11741
11742                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11743                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11744                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11745                                 BGS_GUARD_ERR_MASK;
11746                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11747                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11748                                 BGS_APPTAG_ERR_MASK;
11749                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11750                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11751                                 BGS_REFTAG_ERR_MASK;
11752
11753                 /* Check to see if there was any good data before the error */
11754                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11755                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11756                                 BGS_HI_WATER_MARK_PRESENT_MASK;
11757                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
11758                                 wcqe->total_data_placed;
11759                 }
11760
11761                 /*
11762                 * Set ALL the error bits to indicate we don't know what
11763                 * type of error it is.
11764                 */
11765                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11766                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11767                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11768                                 BGS_GUARD_ERR_MASK);
11769         }
11770
11771         /* Pick up HBA exchange busy condition */
11772         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11773                 spin_lock_irqsave(&phba->hbalock, iflags);
11774                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11775                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11776         }
11777 }
11778
11779 /**
11780  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11781  * @phba: Pointer to HBA context object.
11782  * @wcqe: Pointer to work-queue completion queue entry.
11783  *
11784  * This routine handles an ELS work-queue completion event and construct
11785  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11786  * discovery engine to handle.
11787  *
11788  * Return: Pointer to the receive IOCBQ, NULL otherwise.
11789  **/
11790 static struct lpfc_iocbq *
11791 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11792                                struct lpfc_iocbq *irspiocbq)
11793 {
11794         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11795         struct lpfc_iocbq *cmdiocbq;
11796         struct lpfc_wcqe_complete *wcqe;
11797         unsigned long iflags;
11798
11799         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11800         spin_lock_irqsave(&pring->ring_lock, iflags);
11801         pring->stats.iocb_event++;
11802         /* Look up the ELS command IOCB and create pseudo response IOCB */
11803         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11804                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11805         spin_unlock_irqrestore(&pring->ring_lock, iflags);
11806
11807         if (unlikely(!cmdiocbq)) {
11808                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11809                                 "0386 ELS complete with no corresponding "
11810                                 "cmdiocb: iotag (%d)\n",
11811                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11812                 lpfc_sli_release_iocbq(phba, irspiocbq);
11813                 return NULL;
11814         }
11815
11816         /* Fake the irspiocbq and copy necessary response information */
11817         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11818
11819         return irspiocbq;
11820 }
11821
11822 /**
11823  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11824  * @phba: Pointer to HBA context object.
11825  * @cqe: Pointer to mailbox completion queue entry.
11826  *
11827  * This routine process a mailbox completion queue entry with asynchrous
11828  * event.
11829  *
11830  * Return: true if work posted to worker thread, otherwise false.
11831  **/
11832 static bool
11833 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11834 {
11835         struct lpfc_cq_event *cq_event;
11836         unsigned long iflags;
11837
11838         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11839                         "0392 Async Event: word0:x%x, word1:x%x, "
11840                         "word2:x%x, word3:x%x\n", mcqe->word0,
11841                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11842
11843         /* Allocate a new internal CQ_EVENT entry */
11844         cq_event = lpfc_sli4_cq_event_alloc(phba);
11845         if (!cq_event) {
11846                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11847                                 "0394 Failed to allocate CQ_EVENT entry\n");
11848                 return false;
11849         }
11850
11851         /* Move the CQE into an asynchronous event entry */
11852         memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11853         spin_lock_irqsave(&phba->hbalock, iflags);
11854         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11855         /* Set the async event flag */
11856         phba->hba_flag |= ASYNC_EVENT;
11857         spin_unlock_irqrestore(&phba->hbalock, iflags);
11858
11859         return true;
11860 }
11861
11862 /**
11863  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11864  * @phba: Pointer to HBA context object.
11865  * @cqe: Pointer to mailbox completion queue entry.
11866  *
11867  * This routine process a mailbox completion queue entry with mailbox
11868  * completion event.
11869  *
11870  * Return: true if work posted to worker thread, otherwise false.
11871  **/
11872 static bool
11873 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11874 {
11875         uint32_t mcqe_status;
11876         MAILBOX_t *mbox, *pmbox;
11877         struct lpfc_mqe *mqe;
11878         struct lpfc_vport *vport;
11879         struct lpfc_nodelist *ndlp;
11880         struct lpfc_dmabuf *mp;
11881         unsigned long iflags;
11882         LPFC_MBOXQ_t *pmb;
11883         bool workposted = false;
11884         int rc;
11885
11886         /* If not a mailbox complete MCQE, out by checking mailbox consume */
11887         if (!bf_get(lpfc_trailer_completed, mcqe))
11888                 goto out_no_mqe_complete;
11889
11890         /* Get the reference to the active mbox command */
11891         spin_lock_irqsave(&phba->hbalock, iflags);
11892         pmb = phba->sli.mbox_active;
11893         if (unlikely(!pmb)) {
11894                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11895                                 "1832 No pending MBOX command to handle\n");
11896                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11897                 goto out_no_mqe_complete;
11898         }
11899         spin_unlock_irqrestore(&phba->hbalock, iflags);
11900         mqe = &pmb->u.mqe;
11901         pmbox = (MAILBOX_t *)&pmb->u.mqe;
11902         mbox = phba->mbox;
11903         vport = pmb->vport;
11904
11905         /* Reset heartbeat timer */
11906         phba->last_completion_time = jiffies;
11907         del_timer(&phba->sli.mbox_tmo);
11908
11909         /* Move mbox data to caller's mailbox region, do endian swapping */
11910         if (pmb->mbox_cmpl && mbox)
11911                 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11912
11913         /*
11914          * For mcqe errors, conditionally move a modified error code to
11915          * the mbox so that the error will not be missed.
11916          */
11917         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11918         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11919                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11920                         bf_set(lpfc_mqe_status, mqe,
11921                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
11922         }
11923         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11924                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11925                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11926                                       "MBOX dflt rpi: status:x%x rpi:x%x",
11927                                       mcqe_status,
11928                                       pmbox->un.varWords[0], 0);
11929                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11930                         mp = (struct lpfc_dmabuf *)(pmb->context1);
11931                         ndlp = (struct lpfc_nodelist *)pmb->context2;
11932                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
11933                          * RID of the PPI using the same mbox buffer.
11934                          */
11935                         lpfc_unreg_login(phba, vport->vpi,
11936                                          pmbox->un.varWords[0], pmb);
11937                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11938                         pmb->context1 = mp;
11939                         pmb->context2 = ndlp;
11940                         pmb->vport = vport;
11941                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11942                         if (rc != MBX_BUSY)
11943                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11944                                                 LOG_SLI, "0385 rc should "
11945                                                 "have been MBX_BUSY\n");
11946                         if (rc != MBX_NOT_FINISHED)
11947                                 goto send_current_mbox;
11948                 }
11949         }
11950         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11951         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11952         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11953
11954         /* There is mailbox completion work to do */
11955         spin_lock_irqsave(&phba->hbalock, iflags);
11956         __lpfc_mbox_cmpl_put(phba, pmb);
11957         phba->work_ha |= HA_MBATT;
11958         spin_unlock_irqrestore(&phba->hbalock, iflags);
11959         workposted = true;
11960
11961 send_current_mbox:
11962         spin_lock_irqsave(&phba->hbalock, iflags);
11963         /* Release the mailbox command posting token */
11964         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11965         /* Setting active mailbox pointer need to be in sync to flag clear */
11966         phba->sli.mbox_active = NULL;
11967         if (bf_get(lpfc_trailer_consumed, mcqe))
11968                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11969         spin_unlock_irqrestore(&phba->hbalock, iflags);
11970         /* Wake up worker thread to post the next pending mailbox command */
11971         lpfc_worker_wake_up(phba);
11972         return workposted;
11973
11974 out_no_mqe_complete:
11975         spin_lock_irqsave(&phba->hbalock, iflags);
11976         if (bf_get(lpfc_trailer_consumed, mcqe))
11977                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11978         spin_unlock_irqrestore(&phba->hbalock, iflags);
11979         return false;
11980 }
11981
11982 /**
11983  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11984  * @phba: Pointer to HBA context object.
11985  * @cqe: Pointer to mailbox completion queue entry.
11986  *
11987  * This routine process a mailbox completion queue entry, it invokes the
11988  * proper mailbox complete handling or asynchrous event handling routine
11989  * according to the MCQE's async bit.
11990  *
11991  * Return: true if work posted to worker thread, otherwise false.
11992  **/
11993 static bool
11994 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11995 {
11996         struct lpfc_mcqe mcqe;
11997         bool workposted;
11998
11999         /* Copy the mailbox MCQE and convert endian order as needed */
12000         lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12001
12002         /* Invoke the proper event handling routine */
12003         if (!bf_get(lpfc_trailer_async, &mcqe))
12004                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12005         else
12006                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12007         return workposted;
12008 }
12009
12010 /**
12011  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12012  * @phba: Pointer to HBA context object.
12013  * @cq: Pointer to associated CQ
12014  * @wcqe: Pointer to work-queue completion queue entry.
12015  *
12016  * This routine handles an ELS work-queue completion event.
12017  *
12018  * Return: true if work posted to worker thread, otherwise false.
12019  **/
12020 static bool
12021 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12022                              struct lpfc_wcqe_complete *wcqe)
12023 {
12024         struct lpfc_iocbq *irspiocbq;
12025         unsigned long iflags;
12026         struct lpfc_sli_ring *pring = cq->pring;
12027         int txq_cnt = 0;
12028         int txcmplq_cnt = 0;
12029         int fcp_txcmplq_cnt = 0;
12030
12031         /* Get an irspiocbq for later ELS response processing use */
12032         irspiocbq = lpfc_sli_get_iocbq(phba);
12033         if (!irspiocbq) {
12034                 if (!list_empty(&pring->txq))
12035                         txq_cnt++;
12036                 if (!list_empty(&pring->txcmplq))
12037                         txcmplq_cnt++;
12038                 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
12039                         fcp_txcmplq_cnt++;
12040                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12041                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12042                         "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12043                         txq_cnt, phba->iocb_cnt,
12044                         fcp_txcmplq_cnt,
12045                         txcmplq_cnt);
12046                 return false;
12047         }
12048
12049         /* Save off the slow-path queue event for work thread to process */
12050         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12051         spin_lock_irqsave(&phba->hbalock, iflags);
12052         list_add_tail(&irspiocbq->cq_event.list,
12053                       &phba->sli4_hba.sp_queue_event);
12054         phba->hba_flag |= HBA_SP_QUEUE_EVT;
12055         spin_unlock_irqrestore(&phba->hbalock, iflags);
12056
12057         return true;
12058 }
12059
12060 /**
12061  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12062  * @phba: Pointer to HBA context object.
12063  * @wcqe: Pointer to work-queue completion queue entry.
12064  *
12065  * This routine handles slow-path WQ entry comsumed event by invoking the
12066  * proper WQ release routine to the slow-path WQ.
12067  **/
12068 static void
12069 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12070                              struct lpfc_wcqe_release *wcqe)
12071 {
12072         /* sanity check on queue memory */
12073         if (unlikely(!phba->sli4_hba.els_wq))
12074                 return;
12075         /* Check for the slow-path ELS work queue */
12076         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12077                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12078                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12079         else
12080                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12081                                 "2579 Slow-path wqe consume event carries "
12082                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12083                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12084                                 phba->sli4_hba.els_wq->queue_id);
12085 }
12086
12087 /**
12088  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12089  * @phba: Pointer to HBA context object.
12090  * @cq: Pointer to a WQ completion queue.
12091  * @wcqe: Pointer to work-queue completion queue entry.
12092  *
12093  * This routine handles an XRI abort event.
12094  *
12095  * Return: true if work posted to worker thread, otherwise false.
12096  **/
12097 static bool
12098 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12099                                    struct lpfc_queue *cq,
12100                                    struct sli4_wcqe_xri_aborted *wcqe)
12101 {
12102         bool workposted = false;
12103         struct lpfc_cq_event *cq_event;
12104         unsigned long iflags;
12105
12106         /* Allocate a new internal CQ_EVENT entry */
12107         cq_event = lpfc_sli4_cq_event_alloc(phba);
12108         if (!cq_event) {
12109                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12110                                 "0602 Failed to allocate CQ_EVENT entry\n");
12111                 return false;
12112         }
12113
12114         /* Move the CQE into the proper xri abort event list */
12115         memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12116         switch (cq->subtype) {
12117         case LPFC_FCP:
12118                 spin_lock_irqsave(&phba->hbalock, iflags);
12119                 list_add_tail(&cq_event->list,
12120                               &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12121                 /* Set the fcp xri abort event flag */
12122                 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12123                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12124                 workposted = true;
12125                 break;
12126         case LPFC_ELS:
12127                 spin_lock_irqsave(&phba->hbalock, iflags);
12128                 list_add_tail(&cq_event->list,
12129                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12130                 /* Set the els xri abort event flag */
12131                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12132                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12133                 workposted = true;
12134                 break;
12135         default:
12136                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12137                                 "0603 Invalid work queue CQE subtype (x%x)\n",
12138                                 cq->subtype);
12139                 workposted = false;
12140                 break;
12141         }
12142         return workposted;
12143 }
12144
12145 /**
12146  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12147  * @phba: Pointer to HBA context object.
12148  * @rcqe: Pointer to receive-queue completion queue entry.
12149  *
12150  * This routine process a receive-queue completion queue entry.
12151  *
12152  * Return: true if work posted to worker thread, otherwise false.
12153  **/
12154 static bool
12155 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12156 {
12157         bool workposted = false;
12158         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12159         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12160         struct hbq_dmabuf *dma_buf;
12161         uint32_t status, rq_id;
12162         unsigned long iflags;
12163
12164         /* sanity check on queue memory */
12165         if (unlikely(!hrq) || unlikely(!drq))
12166                 return workposted;
12167
12168         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12169                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12170         else
12171                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12172         if (rq_id != hrq->queue_id)
12173                 goto out;
12174
12175         status = bf_get(lpfc_rcqe_status, rcqe);
12176         switch (status) {
12177         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12178                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12179                                 "2537 Receive Frame Truncated!!\n");
12180                 hrq->RQ_buf_trunc++;
12181         case FC_STATUS_RQ_SUCCESS:
12182                 lpfc_sli4_rq_release(hrq, drq);
12183                 spin_lock_irqsave(&phba->hbalock, iflags);
12184                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12185                 if (!dma_buf) {
12186                         hrq->RQ_no_buf_found++;
12187                         spin_unlock_irqrestore(&phba->hbalock, iflags);
12188                         goto out;
12189                 }
12190                 hrq->RQ_rcv_buf++;
12191                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12192                 /* save off the frame for the word thread to process */
12193                 list_add_tail(&dma_buf->cq_event.list,
12194                               &phba->sli4_hba.sp_queue_event);
12195                 /* Frame received */
12196                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12197                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12198                 workposted = true;
12199                 break;
12200         case FC_STATUS_INSUFF_BUF_NEED_BUF:
12201         case FC_STATUS_INSUFF_BUF_FRM_DISC:
12202                 hrq->RQ_no_posted_buf++;
12203                 /* Post more buffers if possible */
12204                 spin_lock_irqsave(&phba->hbalock, iflags);
12205                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12206                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12207                 workposted = true;
12208                 break;
12209         }
12210 out:
12211         return workposted;
12212 }
12213
12214 /**
12215  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12216  * @phba: Pointer to HBA context object.
12217  * @cq: Pointer to the completion queue.
12218  * @wcqe: Pointer to a completion queue entry.
12219  *
12220  * This routine process a slow-path work-queue or receive queue completion queue
12221  * entry.
12222  *
12223  * Return: true if work posted to worker thread, otherwise false.
12224  **/
12225 static bool
12226 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12227                          struct lpfc_cqe *cqe)
12228 {
12229         struct lpfc_cqe cqevt;
12230         bool workposted = false;
12231
12232         /* Copy the work queue CQE and convert endian order if needed */
12233         lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12234
12235         /* Check and process for different type of WCQE and dispatch */
12236         switch (bf_get(lpfc_cqe_code, &cqevt)) {
12237         case CQE_CODE_COMPL_WQE:
12238                 /* Process the WQ/RQ complete event */
12239                 phba->last_completion_time = jiffies;
12240                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12241                                 (struct lpfc_wcqe_complete *)&cqevt);
12242                 break;
12243         case CQE_CODE_RELEASE_WQE:
12244                 /* Process the WQ release event */
12245                 lpfc_sli4_sp_handle_rel_wcqe(phba,
12246                                 (struct lpfc_wcqe_release *)&cqevt);
12247                 break;
12248         case CQE_CODE_XRI_ABORTED:
12249                 /* Process the WQ XRI abort event */
12250                 phba->last_completion_time = jiffies;
12251                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12252                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
12253                 break;
12254         case CQE_CODE_RECEIVE:
12255         case CQE_CODE_RECEIVE_V1:
12256                 /* Process the RQ event */
12257                 phba->last_completion_time = jiffies;
12258                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
12259                                 (struct lpfc_rcqe *)&cqevt);
12260                 break;
12261         default:
12262                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12263                                 "0388 Not a valid WCQE code: x%x\n",
12264                                 bf_get(lpfc_cqe_code, &cqevt));
12265                 break;
12266         }
12267         return workposted;
12268 }
12269
12270 /**
12271  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12272  * @phba: Pointer to HBA context object.
12273  * @eqe: Pointer to fast-path event queue entry.
12274  *
12275  * This routine process a event queue entry from the slow-path event queue.
12276  * It will check the MajorCode and MinorCode to determine this is for a
12277  * completion event on a completion queue, if not, an error shall be logged
12278  * and just return. Otherwise, it will get to the corresponding completion
12279  * queue and process all the entries on that completion queue, rearm the
12280  * completion queue, and then return.
12281  *
12282  **/
12283 static void
12284 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12285         struct lpfc_queue *speq)
12286 {
12287         struct lpfc_queue *cq = NULL, *childq;
12288         struct lpfc_cqe *cqe;
12289         bool workposted = false;
12290         int ecount = 0;
12291         uint16_t cqid;
12292
12293         /* Get the reference to the corresponding CQ */
12294         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12295
12296         list_for_each_entry(childq, &speq->child_list, list) {
12297                 if (childq->queue_id == cqid) {
12298                         cq = childq;
12299                         break;
12300                 }
12301         }
12302         if (unlikely(!cq)) {
12303                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12304                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12305                                         "0365 Slow-path CQ identifier "
12306                                         "(%d) does not exist\n", cqid);
12307                 return;
12308         }
12309
12310         /* Process all the entries to the CQ */
12311         switch (cq->type) {
12312         case LPFC_MCQ:
12313                 while ((cqe = lpfc_sli4_cq_get(cq))) {
12314                         workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12315                         if (!(++ecount % cq->entry_repost))
12316                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12317                         cq->CQ_mbox++;
12318                 }
12319                 break;
12320         case LPFC_WCQ:
12321                 while ((cqe = lpfc_sli4_cq_get(cq))) {
12322                         if (cq->subtype == LPFC_FCP)
12323                                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
12324                                                                        cqe);
12325                         else
12326                                 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12327                                                                       cqe);
12328                         if (!(++ecount % cq->entry_repost))
12329                                 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12330                 }
12331
12332                 /* Track the max number of CQEs processed in 1 EQ */
12333                 if (ecount > cq->CQ_max_cqe)
12334                         cq->CQ_max_cqe = ecount;
12335                 break;
12336         default:
12337                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12338                                 "0370 Invalid completion queue type (%d)\n",
12339                                 cq->type);
12340                 return;
12341         }
12342
12343         /* Catch the no cq entry condition, log an error */
12344         if (unlikely(ecount == 0))
12345                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12346                                 "0371 No entry from the CQ: identifier "
12347                                 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12348
12349         /* In any case, flash and re-arm the RCQ */
12350         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12351
12352         /* wake up worker thread if there are works to be done */
12353         if (workposted)
12354                 lpfc_worker_wake_up(phba);
12355 }
12356
12357 /**
12358  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
12359  * @phba: Pointer to HBA context object.
12360  * @cq: Pointer to associated CQ
12361  * @wcqe: Pointer to work-queue completion queue entry.
12362  *
12363  * This routine process a fast-path work queue completion entry from fast-path
12364  * event queue for FCP command response completion.
12365  **/
12366 static void
12367 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12368                              struct lpfc_wcqe_complete *wcqe)
12369 {
12370         struct lpfc_sli_ring *pring = cq->pring;
12371         struct lpfc_iocbq *cmdiocbq;
12372         struct lpfc_iocbq irspiocbq;
12373         unsigned long iflags;
12374
12375         /* Check for response status */
12376         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12377                 /* If resource errors reported from HBA, reduce queue
12378                  * depth of the SCSI device.
12379                  */
12380                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12381                      IOSTAT_LOCAL_REJECT)) &&
12382                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
12383                      IOERR_NO_RESOURCES))
12384                         phba->lpfc_rampdown_queue_depth(phba);
12385
12386                 /* Log the error status */
12387                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12388                                 "0373 FCP complete error: status=x%x, "
12389                                 "hw_status=x%x, total_data_specified=%d, "
12390                                 "parameter=x%x, word3=x%x\n",
12391                                 bf_get(lpfc_wcqe_c_status, wcqe),
12392                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
12393                                 wcqe->total_data_placed, wcqe->parameter,
12394                                 wcqe->word3);
12395         }
12396
12397         /* Look up the FCP command IOCB and create pseudo response IOCB */
12398         spin_lock_irqsave(&pring->ring_lock, iflags);
12399         pring->stats.iocb_event++;
12400         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12401                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12402         spin_unlock_irqrestore(&pring->ring_lock, iflags);
12403         if (unlikely(!cmdiocbq)) {
12404                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12405                                 "0374 FCP complete with no corresponding "
12406                                 "cmdiocb: iotag (%d)\n",
12407                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12408                 return;
12409         }
12410         if (unlikely(!cmdiocbq->iocb_cmpl)) {
12411                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12412                                 "0375 FCP cmdiocb not callback function "
12413                                 "iotag: (%d)\n",
12414                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12415                 return;
12416         }
12417
12418         /* Fake the irspiocb and copy necessary response information */
12419         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
12420
12421         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12422                 spin_lock_irqsave(&phba->hbalock, iflags);
12423                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12424                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12425         }
12426
12427         /* Pass the cmd_iocb and the rsp state to the upper layer */
12428         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12429 }
12430
12431 /**
12432  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12433  * @phba: Pointer to HBA context object.
12434  * @cq: Pointer to completion queue.
12435  * @wcqe: Pointer to work-queue completion queue entry.
12436  *
12437  * This routine handles an fast-path WQ entry comsumed event by invoking the
12438  * proper WQ release routine to the slow-path WQ.
12439  **/
12440 static void
12441 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12442                              struct lpfc_wcqe_release *wcqe)
12443 {
12444         struct lpfc_queue *childwq;
12445         bool wqid_matched = false;
12446         uint16_t fcp_wqid;
12447
12448         /* Check for fast-path FCP work queue release */
12449         fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
12450         list_for_each_entry(childwq, &cq->child_list, list) {
12451                 if (childwq->queue_id == fcp_wqid) {
12452                         lpfc_sli4_wq_release(childwq,
12453                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12454                         wqid_matched = true;
12455                         break;
12456                 }
12457         }
12458         /* Report warning log message if no match found */
12459         if (wqid_matched != true)
12460                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12461                                 "2580 Fast-path wqe consume event carries "
12462                                 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
12463 }
12464
12465 /**
12466  * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
12467  * @cq: Pointer to the completion queue.
12468  * @eqe: Pointer to fast-path completion queue entry.
12469  *
12470  * This routine process a fast-path work queue completion entry from fast-path
12471  * event queue for FCP command response completion.
12472  **/
12473 static int
12474 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12475                          struct lpfc_cqe *cqe)
12476 {
12477         struct lpfc_wcqe_release wcqe;
12478         bool workposted = false;
12479
12480         /* Copy the work queue CQE and convert endian order if needed */
12481         lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
12482
12483         /* Check and process for different type of WCQE and dispatch */
12484         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
12485         case CQE_CODE_COMPL_WQE:
12486                 cq->CQ_wq++;
12487                 /* Process the WQ complete event */
12488                 phba->last_completion_time = jiffies;
12489                 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12490                                 (struct lpfc_wcqe_complete *)&wcqe);
12491                 break;
12492         case CQE_CODE_RELEASE_WQE:
12493                 cq->CQ_release_wqe++;
12494                 /* Process the WQ release event */
12495                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
12496                                 (struct lpfc_wcqe_release *)&wcqe);
12497                 break;
12498         case CQE_CODE_XRI_ABORTED:
12499                 cq->CQ_xri_aborted++;
12500                 /* Process the WQ XRI abort event */
12501                 phba->last_completion_time = jiffies;
12502                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12503                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
12504                 break;
12505         default:
12506                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12507                                 "0144 Not a valid WCQE code: x%x\n",
12508                                 bf_get(lpfc_wcqe_c_code, &wcqe));
12509                 break;
12510         }
12511         return workposted;
12512 }
12513
12514 /**
12515  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
12516  * @phba: Pointer to HBA context object.
12517  * @eqe: Pointer to fast-path event queue entry.
12518  *
12519  * This routine process a event queue entry from the fast-path event queue.
12520  * It will check the MajorCode and MinorCode to determine this is for a
12521  * completion event on a completion queue, if not, an error shall be logged
12522  * and just return. Otherwise, it will get to the corresponding completion
12523  * queue and process all the entries on the completion queue, rearm the
12524  * completion queue, and then return.
12525  **/
12526 static void
12527 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12528                         uint32_t qidx)
12529 {
12530         struct lpfc_queue *cq;
12531         struct lpfc_cqe *cqe;
12532         bool workposted = false;
12533         uint16_t cqid;
12534         int ecount = 0;
12535
12536         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12537                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12538                                 "0366 Not a valid completion "
12539                                 "event: majorcode=x%x, minorcode=x%x\n",
12540                                 bf_get_le32(lpfc_eqe_major_code, eqe),
12541                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
12542                 return;
12543         }
12544
12545         /* Get the reference to the corresponding CQ */
12546         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12547
12548         /* Check if this is a Slow path event */
12549         if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
12550                 lpfc_sli4_sp_handle_eqe(phba, eqe,
12551                         phba->sli4_hba.hba_eq[qidx]);
12552                 return;
12553         }
12554
12555         if (unlikely(!phba->sli4_hba.fcp_cq)) {
12556                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12557                                 "3146 Fast-path completion queues "
12558                                 "does not exist\n");
12559                 return;
12560         }
12561         cq = phba->sli4_hba.fcp_cq[qidx];
12562         if (unlikely(!cq)) {
12563                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12564                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12565                                         "0367 Fast-path completion queue "
12566                                         "(%d) does not exist\n", qidx);
12567                 return;
12568         }
12569
12570         if (unlikely(cqid != cq->queue_id)) {
12571                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12572                                 "0368 Miss-matched fast-path completion "
12573                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
12574                                 cqid, cq->queue_id);
12575                 return;
12576         }
12577
12578         /* Process all the entries to the CQ */
12579         while ((cqe = lpfc_sli4_cq_get(cq))) {
12580                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12581                 if (!(++ecount % cq->entry_repost))
12582                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12583         }
12584
12585         /* Track the max number of CQEs processed in 1 EQ */
12586         if (ecount > cq->CQ_max_cqe)
12587                 cq->CQ_max_cqe = ecount;
12588
12589         /* Catch the no cq entry condition */
12590         if (unlikely(ecount == 0))
12591                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12592                                 "0369 No entry from fast-path completion "
12593                                 "queue fcpcqid=%d\n", cq->queue_id);
12594
12595         /* In any case, flash and re-arm the CQ */
12596         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12597
12598         /* wake up worker thread if there are works to be done */
12599         if (workposted)
12600                 lpfc_worker_wake_up(phba);
12601 }
12602
12603 static void
12604 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12605 {
12606         struct lpfc_eqe *eqe;
12607
12608         /* walk all the EQ entries and drop on the floor */
12609         while ((eqe = lpfc_sli4_eq_get(eq)))
12610                 ;
12611
12612         /* Clear and re-arm the EQ */
12613         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12614 }
12615
12616
12617 /**
12618  * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12619  *                           entry
12620  * @phba: Pointer to HBA context object.
12621  * @eqe: Pointer to fast-path event queue entry.
12622  *
12623  * This routine process a event queue entry from the Flash Optimized Fabric
12624  * event queue.  It will check the MajorCode and MinorCode to determine this
12625  * is for a completion event on a completion queue, if not, an error shall be
12626  * logged and just return. Otherwise, it will get to the corresponding
12627  * completion queue and process all the entries on the completion queue, rearm
12628  * the completion queue, and then return.
12629  **/
12630 static void
12631 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12632 {
12633         struct lpfc_queue *cq;
12634         struct lpfc_cqe *cqe;
12635         bool workposted = false;
12636         uint16_t cqid;
12637         int ecount = 0;
12638
12639         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12640                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12641                                 "9147 Not a valid completion "
12642                                 "event: majorcode=x%x, minorcode=x%x\n",
12643                                 bf_get_le32(lpfc_eqe_major_code, eqe),
12644                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
12645                 return;
12646         }
12647
12648         /* Get the reference to the corresponding CQ */
12649         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12650
12651         /* Next check for OAS */
12652         cq = phba->sli4_hba.oas_cq;
12653         if (unlikely(!cq)) {
12654                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12655                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12656                                         "9148 OAS completion queue "
12657                                         "does not exist\n");
12658                 return;
12659         }
12660
12661         if (unlikely(cqid != cq->queue_id)) {
12662                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12663                                 "9149 Miss-matched fast-path compl "
12664                                 "queue id: eqcqid=%d, fcpcqid=%d\n",
12665                                 cqid, cq->queue_id);
12666                 return;
12667         }
12668
12669         /* Process all the entries to the OAS CQ */
12670         while ((cqe = lpfc_sli4_cq_get(cq))) {
12671                 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12672                 if (!(++ecount % cq->entry_repost))
12673                         lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12674         }
12675
12676         /* Track the max number of CQEs processed in 1 EQ */
12677         if (ecount > cq->CQ_max_cqe)
12678                 cq->CQ_max_cqe = ecount;
12679
12680         /* Catch the no cq entry condition */
12681         if (unlikely(ecount == 0))
12682                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12683                                 "9153 No entry from fast-path completion "
12684                                 "queue fcpcqid=%d\n", cq->queue_id);
12685
12686         /* In any case, flash and re-arm the CQ */
12687         lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12688
12689         /* wake up worker thread if there are works to be done */
12690         if (workposted)
12691                 lpfc_worker_wake_up(phba);
12692 }
12693
12694 /**
12695  * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12696  * @irq: Interrupt number.
12697  * @dev_id: The device context pointer.
12698  *
12699  * This function is directly called from the PCI layer as an interrupt
12700  * service routine when device with SLI-4 interface spec is enabled with
12701  * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12702  * IOCB ring event in the HBA. However, when the device is enabled with either
12703  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12704  * device-level interrupt handler. When the PCI slot is in error recovery
12705  * or the HBA is undergoing initialization, the interrupt handler will not
12706  * process the interrupt. The Flash Optimized Fabric ring event are handled in
12707  * the intrrupt context. This function is called without any lock held.
12708  * It gets the hbalock to access and update SLI data structures. Note that,
12709  * the EQ to CQ are one-to-one map such that the EQ index is
12710  * equal to that of CQ index.
12711  *
12712  * This function returns IRQ_HANDLED when interrupt is handled else it
12713  * returns IRQ_NONE.
12714  **/
12715 irqreturn_t
12716 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12717 {
12718         struct lpfc_hba *phba;
12719         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12720         struct lpfc_queue *eq;
12721         struct lpfc_eqe *eqe;
12722         unsigned long iflag;
12723         int ecount = 0;
12724
12725         /* Get the driver's phba structure from the dev_id */
12726         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12727         phba = fcp_eq_hdl->phba;
12728
12729         if (unlikely(!phba))
12730                 return IRQ_NONE;
12731
12732         /* Get to the EQ struct associated with this vector */
12733         eq = phba->sli4_hba.fof_eq;
12734         if (unlikely(!eq))
12735                 return IRQ_NONE;
12736
12737         /* Check device state for handling interrupt */
12738         if (unlikely(lpfc_intr_state_check(phba))) {
12739                 eq->EQ_badstate++;
12740                 /* Check again for link_state with lock held */
12741                 spin_lock_irqsave(&phba->hbalock, iflag);
12742                 if (phba->link_state < LPFC_LINK_DOWN)
12743                         /* Flush, clear interrupt, and rearm the EQ */
12744                         lpfc_sli4_eq_flush(phba, eq);
12745                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12746                 return IRQ_NONE;
12747         }
12748
12749         /*
12750          * Process all the event on FCP fast-path EQ
12751          */
12752         while ((eqe = lpfc_sli4_eq_get(eq))) {
12753                 lpfc_sli4_fof_handle_eqe(phba, eqe);
12754                 if (!(++ecount % eq->entry_repost))
12755                         lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12756                 eq->EQ_processed++;
12757         }
12758
12759         /* Track the max number of EQEs processed in 1 intr */
12760         if (ecount > eq->EQ_max_eqe)
12761                 eq->EQ_max_eqe = ecount;
12762
12763
12764         if (unlikely(ecount == 0)) {
12765                 eq->EQ_no_entry++;
12766
12767                 if (phba->intr_type == MSIX)
12768                         /* MSI-X treated interrupt served as no EQ share INT */
12769                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12770                                         "9145 MSI-X interrupt with no EQE\n");
12771                 else {
12772                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12773                                         "9146 ISR interrupt with no EQE\n");
12774                         /* Non MSI-X treated on interrupt as EQ share INT */
12775                         return IRQ_NONE;
12776                 }
12777         }
12778         /* Always clear and re-arm the fast-path EQ */
12779         lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12780         return IRQ_HANDLED;
12781 }
12782
12783 /**
12784  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12785  * @irq: Interrupt number.
12786  * @dev_id: The device context pointer.
12787  *
12788  * This function is directly called from the PCI layer as an interrupt
12789  * service routine when device with SLI-4 interface spec is enabled with
12790  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12791  * ring event in the HBA. However, when the device is enabled with either
12792  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12793  * device-level interrupt handler. When the PCI slot is in error recovery
12794  * or the HBA is undergoing initialization, the interrupt handler will not
12795  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12796  * the intrrupt context. This function is called without any lock held.
12797  * It gets the hbalock to access and update SLI data structures. Note that,
12798  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
12799  * equal to that of FCP CQ index.
12800  *
12801  * The link attention and ELS ring attention events are handled
12802  * by the worker thread. The interrupt handler signals the worker thread
12803  * and returns for these events. This function is called without any lock
12804  * held. It gets the hbalock to access and update SLI data structures.
12805  *
12806  * This function returns IRQ_HANDLED when interrupt is handled else it
12807  * returns IRQ_NONE.
12808  **/
12809 irqreturn_t
12810 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
12811 {
12812         struct lpfc_hba *phba;
12813         struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12814         struct lpfc_queue *fpeq;
12815         struct lpfc_eqe *eqe;
12816         unsigned long iflag;
12817         int ecount = 0;
12818         int fcp_eqidx;
12819
12820         /* Get the driver's phba structure from the dev_id */
12821         fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12822         phba = fcp_eq_hdl->phba;
12823         fcp_eqidx = fcp_eq_hdl->idx;
12824
12825         if (unlikely(!phba))
12826                 return IRQ_NONE;
12827         if (unlikely(!phba->sli4_hba.hba_eq))
12828                 return IRQ_NONE;
12829
12830         /* Get to the EQ struct associated with this vector */
12831         fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
12832         if (unlikely(!fpeq))
12833                 return IRQ_NONE;
12834
12835         if (lpfc_fcp_look_ahead) {
12836                 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
12837                         lpfc_sli4_eq_clr_intr(fpeq);
12838                 else {
12839                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12840                         return IRQ_NONE;
12841                 }
12842         }
12843
12844         /* Check device state for handling interrupt */
12845         if (unlikely(lpfc_intr_state_check(phba))) {
12846                 fpeq->EQ_badstate++;
12847                 /* Check again for link_state with lock held */
12848                 spin_lock_irqsave(&phba->hbalock, iflag);
12849                 if (phba->link_state < LPFC_LINK_DOWN)
12850                         /* Flush, clear interrupt, and rearm the EQ */
12851                         lpfc_sli4_eq_flush(phba, fpeq);
12852                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12853                 if (lpfc_fcp_look_ahead)
12854                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12855                 return IRQ_NONE;
12856         }
12857
12858         /*
12859          * Process all the event on FCP fast-path EQ
12860          */
12861         while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12862                 if (eqe == NULL)
12863                         break;
12864
12865                 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12866                 if (!(++ecount % fpeq->entry_repost))
12867                         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
12868                 fpeq->EQ_processed++;
12869         }
12870
12871         /* Track the max number of EQEs processed in 1 intr */
12872         if (ecount > fpeq->EQ_max_eqe)
12873                 fpeq->EQ_max_eqe = ecount;
12874
12875         /* Always clear and re-arm the fast-path EQ */
12876         lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12877
12878         if (unlikely(ecount == 0)) {
12879                 fpeq->EQ_no_entry++;
12880
12881                 if (lpfc_fcp_look_ahead) {
12882                         atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12883                         return IRQ_NONE;
12884                 }
12885
12886                 if (phba->intr_type == MSIX)
12887                         /* MSI-X treated interrupt served as no EQ share INT */
12888                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12889                                         "0358 MSI-X interrupt with no EQE\n");
12890                 else
12891                         /* Non MSI-X treated on interrupt as EQ share INT */
12892                         return IRQ_NONE;
12893         }
12894
12895         if (lpfc_fcp_look_ahead)
12896                 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12897         return IRQ_HANDLED;
12898 } /* lpfc_sli4_fp_intr_handler */
12899
12900 /**
12901  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12902  * @irq: Interrupt number.
12903  * @dev_id: The device context pointer.
12904  *
12905  * This function is the device-level interrupt handler to device with SLI-4
12906  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12907  * interrupt mode is enabled and there is an event in the HBA which requires
12908  * driver attention. This function invokes the slow-path interrupt attention
12909  * handling function and fast-path interrupt attention handling function in
12910  * turn to process the relevant HBA attention events. This function is called
12911  * without any lock held. It gets the hbalock to access and update SLI data
12912  * structures.
12913  *
12914  * This function returns IRQ_HANDLED when interrupt is handled, else it
12915  * returns IRQ_NONE.
12916  **/
12917 irqreturn_t
12918 lpfc_sli4_intr_handler(int irq, void *dev_id)
12919 {
12920         struct lpfc_hba  *phba;
12921         irqreturn_t hba_irq_rc;
12922         bool hba_handled = false;
12923         int fcp_eqidx;
12924
12925         /* Get the driver's phba structure from the dev_id */
12926         phba = (struct lpfc_hba *)dev_id;
12927
12928         if (unlikely(!phba))
12929                 return IRQ_NONE;
12930
12931         /*
12932          * Invoke fast-path host attention interrupt handling as appropriate.
12933          */
12934         for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12935                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12936                                         &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12937                 if (hba_irq_rc == IRQ_HANDLED)
12938                         hba_handled |= true;
12939         }
12940
12941         if (phba->cfg_fof) {
12942                 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12943                                         &phba->sli4_hba.fcp_eq_hdl[0]);
12944                 if (hba_irq_rc == IRQ_HANDLED)
12945                         hba_handled |= true;
12946         }
12947
12948         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12949 } /* lpfc_sli4_intr_handler */
12950
12951 /**
12952  * lpfc_sli4_queue_free - free a queue structure and associated memory
12953  * @queue: The queue structure to free.
12954  *
12955  * This function frees a queue structure and the DMAable memory used for
12956  * the host resident queue. This function must be called after destroying the
12957  * queue on the HBA.
12958  **/
12959 void
12960 lpfc_sli4_queue_free(struct lpfc_queue *queue)
12961 {
12962         struct lpfc_dmabuf *dmabuf;
12963
12964         if (!queue)
12965                 return;
12966
12967         while (!list_empty(&queue->page_list)) {
12968                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12969                                  list);
12970                 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12971                                   dmabuf->virt, dmabuf->phys);
12972                 kfree(dmabuf);
12973         }
12974         kfree(queue);
12975         return;
12976 }
12977
12978 /**
12979  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12980  * @phba: The HBA that this queue is being created on.
12981  * @entry_size: The size of each queue entry for this queue.
12982  * @entry count: The number of entries that this queue will handle.
12983  *
12984  * This function allocates a queue structure and the DMAable memory used for
12985  * the host resident queue. This function must be called before creating the
12986  * queue on the HBA.
12987  **/
12988 struct lpfc_queue *
12989 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12990                       uint32_t entry_count)
12991 {
12992         struct lpfc_queue *queue;
12993         struct lpfc_dmabuf *dmabuf;
12994         int x, total_qe_count;
12995         void *dma_pointer;
12996         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12997
12998         if (!phba->sli4_hba.pc_sli4_params.supported)
12999                 hw_page_size = SLI4_PAGE_SIZE;
13000
13001         queue = kzalloc(sizeof(struct lpfc_queue) +
13002                         (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13003         if (!queue)
13004                 return NULL;
13005         queue->page_count = (ALIGN(entry_size * entry_count,
13006                         hw_page_size))/hw_page_size;
13007         INIT_LIST_HEAD(&queue->list);
13008         INIT_LIST_HEAD(&queue->page_list);
13009         INIT_LIST_HEAD(&queue->child_list);
13010         for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13011                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13012                 if (!dmabuf)
13013                         goto out_fail;
13014                 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13015                                                    hw_page_size, &dmabuf->phys,
13016                                                    GFP_KERNEL);
13017                 if (!dmabuf->virt) {
13018                         kfree(dmabuf);
13019                         goto out_fail;
13020                 }
13021                 dmabuf->buffer_tag = x;
13022                 list_add_tail(&dmabuf->list, &queue->page_list);
13023                 /* initialize queue's entry array */
13024                 dma_pointer = dmabuf->virt;
13025                 for (; total_qe_count < entry_count &&
13026                      dma_pointer < (hw_page_size + dmabuf->virt);
13027                      total_qe_count++, dma_pointer += entry_size) {
13028                         queue->qe[total_qe_count].address = dma_pointer;
13029                 }
13030         }
13031         queue->entry_size = entry_size;
13032         queue->entry_count = entry_count;
13033
13034         /*
13035          * entry_repost is calculated based on the number of entries in the
13036          * queue. This works out except for RQs. If buffers are NOT initially
13037          * posted for every RQE, entry_repost should be adjusted accordingly.
13038          */
13039         queue->entry_repost = (entry_count >> 3);
13040         if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13041                 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13042         queue->phba = phba;
13043
13044         return queue;
13045 out_fail:
13046         lpfc_sli4_queue_free(queue);
13047         return NULL;
13048 }
13049
13050 /**
13051  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13052  * @phba: HBA structure that indicates port to create a queue on.
13053  * @pci_barset: PCI BAR set flag.
13054  *
13055  * This function shall perform iomap of the specified PCI BAR address to host
13056  * memory address if not already done so and return it. The returned host
13057  * memory address can be NULL.
13058  */
13059 static void __iomem *
13060 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13061 {
13062         if (!phba->pcidev)
13063                 return NULL;
13064
13065         switch (pci_barset) {
13066         case WQ_PCI_BAR_0_AND_1:
13067                 return phba->pci_bar0_memmap_p;
13068         case WQ_PCI_BAR_2_AND_3:
13069                 return phba->pci_bar2_memmap_p;
13070         case WQ_PCI_BAR_4_AND_5:
13071                 return phba->pci_bar4_memmap_p;
13072         default:
13073                 break;
13074         }
13075         return NULL;
13076 }
13077
13078 /**
13079  * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
13080  * @phba: HBA structure that indicates port to create a queue on.
13081  * @startq: The starting FCP EQ to modify
13082  *
13083  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13084  *
13085  * The @phba struct is used to send mailbox command to HBA. The @startq
13086  * is used to get the starting FCP EQ to change.
13087  * This function is asynchronous and will wait for the mailbox
13088  * command to finish before continuing.
13089  *
13090  * On success this function will return a zero. If unable to allocate enough
13091  * memory this function will return -ENOMEM. If the queue create mailbox command
13092  * fails this function will return -ENXIO.
13093  **/
13094 int
13095 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13096 {
13097         struct lpfc_mbx_modify_eq_delay *eq_delay;
13098         LPFC_MBOXQ_t *mbox;
13099         struct lpfc_queue *eq;
13100         int cnt, rc, length, status = 0;
13101         uint32_t shdr_status, shdr_add_status;
13102         uint32_t result;
13103         int fcp_eqidx;
13104         union lpfc_sli4_cfg_shdr *shdr;
13105         uint16_t dmult;
13106
13107         if (startq >= phba->cfg_fcp_io_channel)
13108                 return 0;
13109
13110         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13111         if (!mbox)
13112                 return -ENOMEM;
13113         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13114                   sizeof(struct lpfc_sli4_cfg_mhdr));
13115         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13116                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13117                          length, LPFC_SLI4_MBX_EMBED);
13118         eq_delay = &mbox->u.mqe.un.eq_delay;
13119
13120         /* Calculate delay multiper from maximum interrupt per second */
13121         result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
13122         if (result > LPFC_DMULT_CONST)
13123                 dmult = 0;
13124         else
13125                 dmult = LPFC_DMULT_CONST/result - 1;
13126
13127         cnt = 0;
13128         for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
13129             fcp_eqidx++) {
13130                 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
13131                 if (!eq)
13132                         continue;
13133                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13134                 eq_delay->u.request.eq[cnt].phase = 0;
13135                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13136                 cnt++;
13137                 if (cnt >= LPFC_MAX_EQ_DELAY)
13138                         break;
13139         }
13140         eq_delay->u.request.num_eq = cnt;
13141
13142         mbox->vport = phba->pport;
13143         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13144         mbox->context1 = NULL;
13145         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13146         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13147         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13148         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13149         if (shdr_status || shdr_add_status || rc) {
13150                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13151                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
13152                                 "status x%x add_status x%x, mbx status x%x\n",
13153                                 shdr_status, shdr_add_status, rc);
13154                 status = -ENXIO;
13155         }
13156         mempool_free(mbox, phba->mbox_mem_pool);
13157         return status;
13158 }
13159
13160 /**
13161  * lpfc_eq_create - Create an Event Queue on the HBA
13162  * @phba: HBA structure that indicates port to create a queue on.
13163  * @eq: The queue structure to use to create the event queue.
13164  * @imax: The maximum interrupt per second limit.
13165  *
13166  * This function creates an event queue, as detailed in @eq, on a port,
13167  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
13168  *
13169  * The @phba struct is used to send mailbox command to HBA. The @eq struct
13170  * is used to get the entry count and entry size that are necessary to
13171  * determine the number of pages to allocate and use for this queue. This
13172  * function will send the EQ_CREATE mailbox command to the HBA to setup the
13173  * event queue. This function is asynchronous and will wait for the mailbox
13174  * command to finish before continuing.
13175  *
13176  * On success this function will return a zero. If unable to allocate enough
13177  * memory this function will return -ENOMEM. If the queue create mailbox command
13178  * fails this function will return -ENXIO.
13179  **/
13180 int
13181 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
13182 {
13183         struct lpfc_mbx_eq_create *eq_create;
13184         LPFC_MBOXQ_t *mbox;
13185         int rc, length, status = 0;
13186         struct lpfc_dmabuf *dmabuf;
13187         uint32_t shdr_status, shdr_add_status;
13188         union lpfc_sli4_cfg_shdr *shdr;
13189         uint16_t dmult;
13190         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13191
13192         /* sanity check on queue memory */
13193         if (!eq)
13194                 return -ENODEV;
13195         if (!phba->sli4_hba.pc_sli4_params.supported)
13196                 hw_page_size = SLI4_PAGE_SIZE;
13197
13198         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13199         if (!mbox)
13200                 return -ENOMEM;
13201         length = (sizeof(struct lpfc_mbx_eq_create) -
13202                   sizeof(struct lpfc_sli4_cfg_mhdr));
13203         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13204                          LPFC_MBOX_OPCODE_EQ_CREATE,
13205                          length, LPFC_SLI4_MBX_EMBED);
13206         eq_create = &mbox->u.mqe.un.eq_create;
13207         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13208                eq->page_count);
13209         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13210                LPFC_EQE_SIZE);
13211         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
13212         /* don't setup delay multiplier using EQ_CREATE */
13213         dmult = 0;
13214         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13215                dmult);
13216         switch (eq->entry_count) {
13217         default:
13218                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13219                                 "0360 Unsupported EQ count. (%d)\n",
13220                                 eq->entry_count);
13221                 if (eq->entry_count < 256)
13222                         return -EINVAL;
13223                 /* otherwise default to smallest count (drop through) */
13224         case 256:
13225                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13226                        LPFC_EQ_CNT_256);
13227                 break;
13228         case 512:
13229                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13230                        LPFC_EQ_CNT_512);
13231                 break;
13232         case 1024:
13233                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13234                        LPFC_EQ_CNT_1024);
13235                 break;
13236         case 2048:
13237                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13238                        LPFC_EQ_CNT_2048);
13239                 break;
13240         case 4096:
13241                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13242                        LPFC_EQ_CNT_4096);
13243                 break;
13244         }
13245         list_for_each_entry(dmabuf, &eq->page_list, list) {
13246                 memset(dmabuf->virt, 0, hw_page_size);
13247                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13248                                         putPaddrLow(dmabuf->phys);
13249                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13250                                         putPaddrHigh(dmabuf->phys);
13251         }
13252         mbox->vport = phba->pport;
13253         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13254         mbox->context1 = NULL;
13255         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13256         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13257         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13258         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13259         if (shdr_status || shdr_add_status || rc) {
13260                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13261                                 "2500 EQ_CREATE mailbox failed with "
13262                                 "status x%x add_status x%x, mbx status x%x\n",
13263                                 shdr_status, shdr_add_status, rc);
13264                 status = -ENXIO;
13265         }
13266         eq->type = LPFC_EQ;
13267         eq->subtype = LPFC_NONE;
13268         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13269         if (eq->queue_id == 0xFFFF)
13270                 status = -ENXIO;
13271         eq->host_index = 0;
13272         eq->hba_index = 0;
13273
13274         mempool_free(mbox, phba->mbox_mem_pool);
13275         return status;
13276 }
13277
13278 /**
13279  * lpfc_cq_create - Create a Completion Queue on the HBA
13280  * @phba: HBA structure that indicates port to create a queue on.
13281  * @cq: The queue structure to use to create the completion queue.
13282  * @eq: The event queue to bind this completion queue to.
13283  *
13284  * This function creates a completion queue, as detailed in @wq, on a port,
13285  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13286  *
13287  * The @phba struct is used to send mailbox command to HBA. The @cq struct
13288  * is used to get the entry count and entry size that are necessary to
13289  * determine the number of pages to allocate and use for this queue. The @eq
13290  * is used to indicate which event queue to bind this completion queue to. This
13291  * function will send the CQ_CREATE mailbox command to the HBA to setup the
13292  * completion queue. This function is asynchronous and will wait for the mailbox
13293  * command to finish before continuing.
13294  *
13295  * On success this function will return a zero. If unable to allocate enough
13296  * memory this function will return -ENOMEM. If the queue create mailbox command
13297  * fails this function will return -ENXIO.
13298  **/
13299 int
13300 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13301                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13302 {
13303         struct lpfc_mbx_cq_create *cq_create;
13304         struct lpfc_dmabuf *dmabuf;
13305         LPFC_MBOXQ_t *mbox;
13306         int rc, length, status = 0;
13307         uint32_t shdr_status, shdr_add_status;
13308         union lpfc_sli4_cfg_shdr *shdr;
13309         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13310
13311         /* sanity check on queue memory */
13312         if (!cq || !eq)
13313                 return -ENODEV;
13314         if (!phba->sli4_hba.pc_sli4_params.supported)
13315                 hw_page_size = SLI4_PAGE_SIZE;
13316
13317         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13318         if (!mbox)
13319                 return -ENOMEM;
13320         length = (sizeof(struct lpfc_mbx_cq_create) -
13321                   sizeof(struct lpfc_sli4_cfg_mhdr));
13322         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13323                          LPFC_MBOX_OPCODE_CQ_CREATE,
13324                          length, LPFC_SLI4_MBX_EMBED);
13325         cq_create = &mbox->u.mqe.un.cq_create;
13326         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
13327         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13328                     cq->page_count);
13329         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13330         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
13331         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13332                phba->sli4_hba.pc_sli4_params.cqv);
13333         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
13334                 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13335                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
13336                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13337                        eq->queue_id);
13338         } else {
13339                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13340                        eq->queue_id);
13341         }
13342         switch (cq->entry_count) {
13343         default:
13344                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13345                                 "0361 Unsupported CQ count. (%d)\n",
13346                                 cq->entry_count);
13347                 if (cq->entry_count < 256) {
13348                         status = -EINVAL;
13349                         goto out;
13350                 }
13351                 /* otherwise default to smallest count (drop through) */
13352         case 256:
13353                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13354                        LPFC_CQ_CNT_256);
13355                 break;
13356         case 512:
13357                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13358                        LPFC_CQ_CNT_512);
13359                 break;
13360         case 1024:
13361                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13362                        LPFC_CQ_CNT_1024);
13363                 break;
13364         }
13365         list_for_each_entry(dmabuf, &cq->page_list, list) {
13366                 memset(dmabuf->virt, 0, hw_page_size);
13367                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13368                                         putPaddrLow(dmabuf->phys);
13369                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13370                                         putPaddrHigh(dmabuf->phys);
13371         }
13372         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13373
13374         /* The IOCTL status is embedded in the mailbox subheader. */
13375         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13376         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13377         if (shdr_status || shdr_add_status || rc) {
13378                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13379                                 "2501 CQ_CREATE mailbox failed with "
13380                                 "status x%x add_status x%x, mbx status x%x\n",
13381                                 shdr_status, shdr_add_status, rc);
13382                 status = -ENXIO;
13383                 goto out;
13384         }
13385         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13386         if (cq->queue_id == 0xFFFF) {
13387                 status = -ENXIO;
13388                 goto out;
13389         }
13390         /* link the cq onto the parent eq child list */
13391         list_add_tail(&cq->list, &eq->child_list);
13392         /* Set up completion queue's type and subtype */
13393         cq->type = type;
13394         cq->subtype = subtype;
13395         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13396         cq->assoc_qid = eq->queue_id;
13397         cq->host_index = 0;
13398         cq->hba_index = 0;
13399
13400 out:
13401         mempool_free(mbox, phba->mbox_mem_pool);
13402         return status;
13403 }
13404
13405 /**
13406  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
13407  * @phba: HBA structure that indicates port to create a queue on.
13408  * @mq: The queue structure to use to create the mailbox queue.
13409  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13410  * @cq: The completion queue to associate with this cq.
13411  *
13412  * This function provides failback (fb) functionality when the
13413  * mq_create_ext fails on older FW generations.  It's purpose is identical
13414  * to mq_create_ext otherwise.
13415  *
13416  * This routine cannot fail as all attributes were previously accessed and
13417  * initialized in mq_create_ext.
13418  **/
13419 static void
13420 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13421                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
13422 {
13423         struct lpfc_mbx_mq_create *mq_create;
13424         struct lpfc_dmabuf *dmabuf;
13425         int length;
13426
13427         length = (sizeof(struct lpfc_mbx_mq_create) -
13428                   sizeof(struct lpfc_sli4_cfg_mhdr));
13429         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13430                          LPFC_MBOX_OPCODE_MQ_CREATE,
13431                          length, LPFC_SLI4_MBX_EMBED);
13432         mq_create = &mbox->u.mqe.un.mq_create;
13433         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
13434                mq->page_count);
13435         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
13436                cq->queue_id);
13437         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13438         switch (mq->entry_count) {
13439         case 16:
13440                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13441                        LPFC_MQ_RING_SIZE_16);
13442                 break;
13443         case 32:
13444                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13445                        LPFC_MQ_RING_SIZE_32);
13446                 break;
13447         case 64:
13448                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13449                        LPFC_MQ_RING_SIZE_64);
13450                 break;
13451         case 128:
13452                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13453                        LPFC_MQ_RING_SIZE_128);
13454                 break;
13455         }
13456         list_for_each_entry(dmabuf, &mq->page_list, list) {
13457                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13458                         putPaddrLow(dmabuf->phys);
13459                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13460                         putPaddrHigh(dmabuf->phys);
13461         }
13462 }
13463
13464 /**
13465  * lpfc_mq_create - Create a mailbox Queue on the HBA
13466  * @phba: HBA structure that indicates port to create a queue on.
13467  * @mq: The queue structure to use to create the mailbox queue.
13468  * @cq: The completion queue to associate with this cq.
13469  * @subtype: The queue's subtype.
13470  *
13471  * This function creates a mailbox queue, as detailed in @mq, on a port,
13472  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
13473  *
13474  * The @phba struct is used to send mailbox command to HBA. The @cq struct
13475  * is used to get the entry count and entry size that are necessary to
13476  * determine the number of pages to allocate and use for this queue. This
13477  * function will send the MQ_CREATE mailbox command to the HBA to setup the
13478  * mailbox queue. This function is asynchronous and will wait for the mailbox
13479  * command to finish before continuing.
13480  *
13481  * On success this function will return a zero. If unable to allocate enough
13482  * memory this function will return -ENOMEM. If the queue create mailbox command
13483  * fails this function will return -ENXIO.
13484  **/
13485 int32_t
13486 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
13487                struct lpfc_queue *cq, uint32_t subtype)
13488 {
13489         struct lpfc_mbx_mq_create *mq_create;
13490         struct lpfc_mbx_mq_create_ext *mq_create_ext;
13491         struct lpfc_dmabuf *dmabuf;
13492         LPFC_MBOXQ_t *mbox;
13493         int rc, length, status = 0;
13494         uint32_t shdr_status, shdr_add_status;
13495         union lpfc_sli4_cfg_shdr *shdr;
13496         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13497
13498         /* sanity check on queue memory */
13499         if (!mq || !cq)
13500                 return -ENODEV;
13501         if (!phba->sli4_hba.pc_sli4_params.supported)
13502                 hw_page_size = SLI4_PAGE_SIZE;
13503
13504         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13505         if (!mbox)
13506                 return -ENOMEM;
13507         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
13508                   sizeof(struct lpfc_sli4_cfg_mhdr));
13509         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13510                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
13511                          length, LPFC_SLI4_MBX_EMBED);
13512
13513         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
13514         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
13515         bf_set(lpfc_mbx_mq_create_ext_num_pages,
13516                &mq_create_ext->u.request, mq->page_count);
13517         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
13518                &mq_create_ext->u.request, 1);
13519         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
13520                &mq_create_ext->u.request, 1);
13521         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
13522                &mq_create_ext->u.request, 1);
13523         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
13524                &mq_create_ext->u.request, 1);
13525         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
13526                &mq_create_ext->u.request, 1);
13527         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
13528         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13529                phba->sli4_hba.pc_sli4_params.mqv);
13530         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
13531                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
13532                        cq->queue_id);
13533         else
13534                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
13535                        cq->queue_id);
13536         switch (mq->entry_count) {
13537         default:
13538                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13539                                 "0362 Unsupported MQ count. (%d)\n",
13540                                 mq->entry_count);
13541                 if (mq->entry_count < 16) {
13542                         status = -EINVAL;
13543                         goto out;
13544                 }
13545                 /* otherwise default to smallest count (drop through) */
13546         case 16:
13547                 bf_set(lpfc_mq_context_ring_size,
13548                        &mq_create_ext->u.request.context,
13549                        LPFC_MQ_RING_SIZE_16);
13550                 break;
13551         case 32:
13552                 bf_set(lpfc_mq_context_ring_size,
13553                        &mq_create_ext->u.request.context,
13554                        LPFC_MQ_RING_SIZE_32);
13555                 break;
13556         case 64:
13557                 bf_set(lpfc_mq_context_ring_size,
13558                        &mq_create_ext->u.request.context,
13559                        LPFC_MQ_RING_SIZE_64);
13560                 break;
13561         case 128:
13562                 bf_set(lpfc_mq_context_ring_size,
13563                        &mq_create_ext->u.request.context,
13564                        LPFC_MQ_RING_SIZE_128);
13565                 break;
13566         }
13567         list_for_each_entry(dmabuf, &mq->page_list, list) {
13568                 memset(dmabuf->virt, 0, hw_page_size);
13569                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
13570                                         putPaddrLow(dmabuf->phys);
13571                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
13572                                         putPaddrHigh(dmabuf->phys);
13573         }
13574         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13575         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13576                               &mq_create_ext->u.response);
13577         if (rc != MBX_SUCCESS) {
13578                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13579                                 "2795 MQ_CREATE_EXT failed with "
13580                                 "status x%x. Failback to MQ_CREATE.\n",
13581                                 rc);
13582                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
13583                 mq_create = &mbox->u.mqe.un.mq_create;
13584                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13585                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
13586                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13587                                       &mq_create->u.response);
13588         }
13589
13590         /* The IOCTL status is embedded in the mailbox subheader. */
13591         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13592         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13593         if (shdr_status || shdr_add_status || rc) {
13594                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13595                                 "2502 MQ_CREATE mailbox failed with "
13596                                 "status x%x add_status x%x, mbx status x%x\n",
13597                                 shdr_status, shdr_add_status, rc);
13598                 status = -ENXIO;
13599                 goto out;
13600         }
13601         if (mq->queue_id == 0xFFFF) {
13602                 status = -ENXIO;
13603                 goto out;
13604         }
13605         mq->type = LPFC_MQ;
13606         mq->assoc_qid = cq->queue_id;
13607         mq->subtype = subtype;
13608         mq->host_index = 0;
13609         mq->hba_index = 0;
13610
13611         /* link the mq onto the parent cq child list */
13612         list_add_tail(&mq->list, &cq->child_list);
13613 out:
13614         mempool_free(mbox, phba->mbox_mem_pool);
13615         return status;
13616 }
13617
13618 /**
13619  * lpfc_wq_create - Create a Work Queue on the HBA
13620  * @phba: HBA structure that indicates port to create a queue on.
13621  * @wq: The queue structure to use to create the work queue.
13622  * @cq: The completion queue to bind this work queue to.
13623  * @subtype: The subtype of the work queue indicating its functionality.
13624  *
13625  * This function creates a work queue, as detailed in @wq, on a port, described
13626  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
13627  *
13628  * The @phba struct is used to send mailbox command to HBA. The @wq struct
13629  * is used to get the entry count and entry size that are necessary to
13630  * determine the number of pages to allocate and use for this queue. The @cq
13631  * is used to indicate which completion queue to bind this work queue to. This
13632  * function will send the WQ_CREATE mailbox command to the HBA to setup the
13633  * work queue. This function is asynchronous and will wait for the mailbox
13634  * command to finish before continuing.
13635  *
13636  * On success this function will return a zero. If unable to allocate enough
13637  * memory this function will return -ENOMEM. If the queue create mailbox command
13638  * fails this function will return -ENXIO.
13639  **/
13640 int
13641 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13642                struct lpfc_queue *cq, uint32_t subtype)
13643 {
13644         struct lpfc_mbx_wq_create *wq_create;
13645         struct lpfc_dmabuf *dmabuf;
13646         LPFC_MBOXQ_t *mbox;
13647         int rc, length, status = 0;
13648         uint32_t shdr_status, shdr_add_status;
13649         union lpfc_sli4_cfg_shdr *shdr;
13650         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13651         struct dma_address *page;
13652         void __iomem *bar_memmap_p;
13653         uint32_t db_offset;
13654         uint16_t pci_barset;
13655
13656         /* sanity check on queue memory */
13657         if (!wq || !cq)
13658                 return -ENODEV;
13659         if (!phba->sli4_hba.pc_sli4_params.supported)
13660                 hw_page_size = SLI4_PAGE_SIZE;
13661
13662         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13663         if (!mbox)
13664                 return -ENOMEM;
13665         length = (sizeof(struct lpfc_mbx_wq_create) -
13666                   sizeof(struct lpfc_sli4_cfg_mhdr));
13667         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13668                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
13669                          length, LPFC_SLI4_MBX_EMBED);
13670         wq_create = &mbox->u.mqe.un.wq_create;
13671         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
13672         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
13673                     wq->page_count);
13674         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
13675                     cq->queue_id);
13676
13677         /* wqv is the earliest version supported, NOT the latest */
13678         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13679                phba->sli4_hba.pc_sli4_params.wqv);
13680
13681         switch (phba->sli4_hba.pc_sli4_params.wqv) {
13682         case LPFC_Q_CREATE_VERSION_0:
13683                 switch (wq->entry_size) {
13684                 default:
13685                 case 64:
13686                         /* Nothing to do, version 0 ONLY supports 64 byte */
13687                         page = wq_create->u.request.page;
13688                         break;
13689                 case 128:
13690                         if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13691                             LPFC_WQ_SZ128_SUPPORT)) {
13692                                 status = -ERANGE;
13693                                 goto out;
13694                         }
13695                         /* If we get here the HBA MUST also support V1 and
13696                          * we MUST use it
13697                          */
13698                         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13699                                LPFC_Q_CREATE_VERSION_1);
13700
13701                         bf_set(lpfc_mbx_wq_create_wqe_count,
13702                                &wq_create->u.request_1, wq->entry_count);
13703                         bf_set(lpfc_mbx_wq_create_wqe_size,
13704                                &wq_create->u.request_1,
13705                                LPFC_WQ_WQE_SIZE_128);
13706                         bf_set(lpfc_mbx_wq_create_page_size,
13707                                &wq_create->u.request_1,
13708                                LPFC_WQ_PAGE_SIZE_4096);
13709                         page = wq_create->u.request_1.page;
13710                         break;
13711                 }
13712                 break;
13713         case LPFC_Q_CREATE_VERSION_1:
13714                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
13715                        wq->entry_count);
13716                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13717                        LPFC_Q_CREATE_VERSION_1);
13718
13719                 switch (wq->entry_size) {
13720                 default:
13721                 case 64:
13722                         bf_set(lpfc_mbx_wq_create_wqe_size,
13723                                &wq_create->u.request_1,
13724                                LPFC_WQ_WQE_SIZE_64);
13725                         break;
13726                 case 128:
13727                         if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13728                                 LPFC_WQ_SZ128_SUPPORT)) {
13729                                 status = -ERANGE;
13730                                 goto out;
13731                         }
13732                         bf_set(lpfc_mbx_wq_create_wqe_size,
13733                                &wq_create->u.request_1,
13734                                LPFC_WQ_WQE_SIZE_128);
13735                         break;
13736                 }
13737                 bf_set(lpfc_mbx_wq_create_page_size,
13738                        &wq_create->u.request_1,
13739                        LPFC_WQ_PAGE_SIZE_4096);
13740                 page = wq_create->u.request_1.page;
13741                 break;
13742         default:
13743                 status = -ERANGE;
13744                 goto out;
13745         }
13746
13747         list_for_each_entry(dmabuf, &wq->page_list, list) {
13748                 memset(dmabuf->virt, 0, hw_page_size);
13749                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
13750                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
13751         }
13752
13753         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13754                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
13755
13756         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13757         /* The IOCTL status is embedded in the mailbox subheader. */
13758         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13759         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13760         if (shdr_status || shdr_add_status || rc) {
13761                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13762                                 "2503 WQ_CREATE mailbox failed with "
13763                                 "status x%x add_status x%x, mbx status x%x\n",
13764                                 shdr_status, shdr_add_status, rc);
13765                 status = -ENXIO;
13766                 goto out;
13767         }
13768         wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
13769         if (wq->queue_id == 0xFFFF) {
13770                 status = -ENXIO;
13771                 goto out;
13772         }
13773         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13774                 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
13775                                        &wq_create->u.response);
13776                 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
13777                     (wq->db_format != LPFC_DB_RING_FORMAT)) {
13778                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13779                                         "3265 WQ[%d] doorbell format not "
13780                                         "supported: x%x\n", wq->queue_id,
13781                                         wq->db_format);
13782                         status = -EINVAL;
13783                         goto out;
13784                 }
13785                 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
13786                                     &wq_create->u.response);
13787                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13788                 if (!bar_memmap_p) {
13789                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13790                                         "3263 WQ[%d] failed to memmap pci "
13791                                         "barset:x%x\n", wq->queue_id,
13792                                         pci_barset);
13793                         status = -ENOMEM;
13794                         goto out;
13795                 }
13796                 db_offset = wq_create->u.response.doorbell_offset;
13797                 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
13798                     (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
13799                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13800                                         "3252 WQ[%d] doorbell offset not "
13801                                         "supported: x%x\n", wq->queue_id,
13802                                         db_offset);
13803                         status = -EINVAL;
13804                         goto out;
13805                 }
13806                 wq->db_regaddr = bar_memmap_p + db_offset;
13807                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13808                                 "3264 WQ[%d]: barset:x%x, offset:x%x, "
13809                                 "format:x%x\n", wq->queue_id, pci_barset,
13810                                 db_offset, wq->db_format);
13811         } else {
13812                 wq->db_format = LPFC_DB_LIST_FORMAT;
13813                 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
13814         }
13815         wq->type = LPFC_WQ;
13816         wq->assoc_qid = cq->queue_id;
13817         wq->subtype = subtype;
13818         wq->host_index = 0;
13819         wq->hba_index = 0;
13820         wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
13821
13822         /* link the wq onto the parent cq child list */
13823         list_add_tail(&wq->list, &cq->child_list);
13824 out:
13825         mempool_free(mbox, phba->mbox_mem_pool);
13826         return status;
13827 }
13828
13829 /**
13830  * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
13831  * @phba: HBA structure that indicates port to create a queue on.
13832  * @rq:   The queue structure to use for the receive queue.
13833  * @qno:  The associated HBQ number
13834  *
13835  *
13836  * For SLI4 we need to adjust the RQ repost value based on
13837  * the number of buffers that are initially posted to the RQ.
13838  */
13839 void
13840 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
13841 {
13842         uint32_t cnt;
13843
13844         /* sanity check on queue memory */
13845         if (!rq)
13846                 return;
13847         cnt = lpfc_hbq_defs[qno]->entry_count;
13848
13849         /* Recalc repost for RQs based on buffers initially posted */
13850         cnt = (cnt >> 3);
13851         if (cnt < LPFC_QUEUE_MIN_REPOST)
13852                 cnt = LPFC_QUEUE_MIN_REPOST;
13853
13854         rq->entry_repost = cnt;
13855 }
13856
13857 /**
13858  * lpfc_rq_create - Create a Receive Queue on the HBA
13859  * @phba: HBA structure that indicates port to create a queue on.
13860  * @hrq: The queue structure to use to create the header receive queue.
13861  * @drq: The queue structure to use to create the data receive queue.
13862  * @cq: The completion queue to bind this work queue to.
13863  *
13864  * This function creates a receive buffer queue pair , as detailed in @hrq and
13865  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
13866  * to the HBA.
13867  *
13868  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
13869  * struct is used to get the entry count that is necessary to determine the
13870  * number of pages to use for this queue. The @cq is used to indicate which
13871  * completion queue to bind received buffers that are posted to these queues to.
13872  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
13873  * receive queue pair. This function is asynchronous and will wait for the
13874  * mailbox command to finish before continuing.
13875  *
13876  * On success this function will return a zero. If unable to allocate enough
13877  * memory this function will return -ENOMEM. If the queue create mailbox command
13878  * fails this function will return -ENXIO.
13879  **/
13880 int
13881 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13882                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
13883 {
13884         struct lpfc_mbx_rq_create *rq_create;
13885         struct lpfc_dmabuf *dmabuf;
13886         LPFC_MBOXQ_t *mbox;
13887         int rc, length, status = 0;
13888         uint32_t shdr_status, shdr_add_status;
13889         union lpfc_sli4_cfg_shdr *shdr;
13890         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13891         void __iomem *bar_memmap_p;
13892         uint32_t db_offset;
13893         uint16_t pci_barset;
13894
13895         /* sanity check on queue memory */
13896         if (!hrq || !drq || !cq)
13897                 return -ENODEV;
13898         if (!phba->sli4_hba.pc_sli4_params.supported)
13899                 hw_page_size = SLI4_PAGE_SIZE;
13900
13901         if (hrq->entry_count != drq->entry_count)
13902                 return -EINVAL;
13903         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13904         if (!mbox)
13905                 return -ENOMEM;
13906         length = (sizeof(struct lpfc_mbx_rq_create) -
13907                   sizeof(struct lpfc_sli4_cfg_mhdr));
13908         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13909                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13910                          length, LPFC_SLI4_MBX_EMBED);
13911         rq_create = &mbox->u.mqe.un.rq_create;
13912         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13913         bf_set(lpfc_mbox_hdr_version, &shdr->request,
13914                phba->sli4_hba.pc_sli4_params.rqv);
13915         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13916                 bf_set(lpfc_rq_context_rqe_count_1,
13917                        &rq_create->u.request.context,
13918                        hrq->entry_count);
13919                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
13920                 bf_set(lpfc_rq_context_rqe_size,
13921                        &rq_create->u.request.context,
13922                        LPFC_RQE_SIZE_8);
13923                 bf_set(lpfc_rq_context_page_size,
13924                        &rq_create->u.request.context,
13925                        LPFC_RQ_PAGE_SIZE_4096);
13926         } else {
13927                 switch (hrq->entry_count) {
13928                 default:
13929                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13930                                         "2535 Unsupported RQ count. (%d)\n",
13931                                         hrq->entry_count);
13932                         if (hrq->entry_count < 512) {
13933                                 status = -EINVAL;
13934                                 goto out;
13935                         }
13936                         /* otherwise default to smallest count (drop through) */
13937                 case 512:
13938                         bf_set(lpfc_rq_context_rqe_count,
13939                                &rq_create->u.request.context,
13940                                LPFC_RQ_RING_SIZE_512);
13941                         break;
13942                 case 1024:
13943                         bf_set(lpfc_rq_context_rqe_count,
13944                                &rq_create->u.request.context,
13945                                LPFC_RQ_RING_SIZE_1024);
13946                         break;
13947                 case 2048:
13948                         bf_set(lpfc_rq_context_rqe_count,
13949                                &rq_create->u.request.context,
13950                                LPFC_RQ_RING_SIZE_2048);
13951                         break;
13952                 case 4096:
13953                         bf_set(lpfc_rq_context_rqe_count,
13954                                &rq_create->u.request.context,
13955                                LPFC_RQ_RING_SIZE_4096);
13956                         break;
13957                 }
13958                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13959                        LPFC_HDR_BUF_SIZE);
13960         }
13961         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13962                cq->queue_id);
13963         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13964                hrq->page_count);
13965         list_for_each_entry(dmabuf, &hrq->page_list, list) {
13966                 memset(dmabuf->virt, 0, hw_page_size);
13967                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13968                                         putPaddrLow(dmabuf->phys);
13969                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13970                                         putPaddrHigh(dmabuf->phys);
13971         }
13972         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13973                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13974
13975         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13976         /* The IOCTL status is embedded in the mailbox subheader. */
13977         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13978         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13979         if (shdr_status || shdr_add_status || rc) {
13980                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13981                                 "2504 RQ_CREATE mailbox failed with "
13982                                 "status x%x add_status x%x, mbx status x%x\n",
13983                                 shdr_status, shdr_add_status, rc);
13984                 status = -ENXIO;
13985                 goto out;
13986         }
13987         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13988         if (hrq->queue_id == 0xFFFF) {
13989                 status = -ENXIO;
13990                 goto out;
13991         }
13992
13993         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13994                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13995                                         &rq_create->u.response);
13996                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13997                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13998                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13999                                         "3262 RQ [%d] doorbell format not "
14000                                         "supported: x%x\n", hrq->queue_id,
14001                                         hrq->db_format);
14002                         status = -EINVAL;
14003                         goto out;
14004                 }
14005
14006                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
14007                                     &rq_create->u.response);
14008                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14009                 if (!bar_memmap_p) {
14010                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14011                                         "3269 RQ[%d] failed to memmap pci "
14012                                         "barset:x%x\n", hrq->queue_id,
14013                                         pci_barset);
14014                         status = -ENOMEM;
14015                         goto out;
14016                 }
14017
14018                 db_offset = rq_create->u.response.doorbell_offset;
14019                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
14020                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
14021                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14022                                         "3270 RQ[%d] doorbell offset not "
14023                                         "supported: x%x\n", hrq->queue_id,
14024                                         db_offset);
14025                         status = -EINVAL;
14026                         goto out;
14027                 }
14028                 hrq->db_regaddr = bar_memmap_p + db_offset;
14029                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14030                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
14031                                 "format:x%x\n", hrq->queue_id, pci_barset,
14032                                 db_offset, hrq->db_format);
14033         } else {
14034                 hrq->db_format = LPFC_DB_RING_FORMAT;
14035                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
14036         }
14037         hrq->type = LPFC_HRQ;
14038         hrq->assoc_qid = cq->queue_id;
14039         hrq->subtype = subtype;
14040         hrq->host_index = 0;
14041         hrq->hba_index = 0;
14042
14043         /* now create the data queue */
14044         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14045                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14046                          length, LPFC_SLI4_MBX_EMBED);
14047         bf_set(lpfc_mbox_hdr_version, &shdr->request,
14048                phba->sli4_hba.pc_sli4_params.rqv);
14049         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14050                 bf_set(lpfc_rq_context_rqe_count_1,
14051                        &rq_create->u.request.context, hrq->entry_count);
14052                 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
14053                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
14054                        LPFC_RQE_SIZE_8);
14055                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
14056                        (PAGE_SIZE/SLI4_PAGE_SIZE));
14057         } else {
14058                 switch (drq->entry_count) {
14059                 default:
14060                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14061                                         "2536 Unsupported RQ count. (%d)\n",
14062                                         drq->entry_count);
14063                         if (drq->entry_count < 512) {
14064                                 status = -EINVAL;
14065                                 goto out;
14066                         }
14067                         /* otherwise default to smallest count (drop through) */
14068                 case 512:
14069                         bf_set(lpfc_rq_context_rqe_count,
14070                                &rq_create->u.request.context,
14071                                LPFC_RQ_RING_SIZE_512);
14072                         break;
14073                 case 1024:
14074                         bf_set(lpfc_rq_context_rqe_count,
14075                                &rq_create->u.request.context,
14076                                LPFC_RQ_RING_SIZE_1024);
14077                         break;
14078                 case 2048:
14079                         bf_set(lpfc_rq_context_rqe_count,
14080                                &rq_create->u.request.context,
14081                                LPFC_RQ_RING_SIZE_2048);
14082                         break;
14083                 case 4096:
14084                         bf_set(lpfc_rq_context_rqe_count,
14085                                &rq_create->u.request.context,
14086                                LPFC_RQ_RING_SIZE_4096);
14087                         break;
14088                 }
14089                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14090                        LPFC_DATA_BUF_SIZE);
14091         }
14092         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14093                cq->queue_id);
14094         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14095                drq->page_count);
14096         list_for_each_entry(dmabuf, &drq->page_list, list) {
14097                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14098                                         putPaddrLow(dmabuf->phys);
14099                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14100                                         putPaddrHigh(dmabuf->phys);
14101         }
14102         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14103                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14104         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14105         /* The IOCTL status is embedded in the mailbox subheader. */
14106         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14107         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14108         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14109         if (shdr_status || shdr_add_status || rc) {
14110                 status = -ENXIO;
14111                 goto out;
14112         }
14113         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14114         if (drq->queue_id == 0xFFFF) {
14115                 status = -ENXIO;
14116                 goto out;
14117         }
14118         drq->type = LPFC_DRQ;
14119         drq->assoc_qid = cq->queue_id;
14120         drq->subtype = subtype;
14121         drq->host_index = 0;
14122         drq->hba_index = 0;
14123
14124         /* link the header and data RQs onto the parent cq child list */
14125         list_add_tail(&hrq->list, &cq->child_list);
14126         list_add_tail(&drq->list, &cq->child_list);
14127
14128 out:
14129         mempool_free(mbox, phba->mbox_mem_pool);
14130         return status;
14131 }
14132
14133 /**
14134  * lpfc_eq_destroy - Destroy an event Queue on the HBA
14135  * @eq: The queue structure associated with the queue to destroy.
14136  *
14137  * This function destroys a queue, as detailed in @eq by sending an mailbox
14138  * command, specific to the type of queue, to the HBA.
14139  *
14140  * The @eq struct is used to get the queue ID of the queue to destroy.
14141  *
14142  * On success this function will return a zero. If the queue destroy mailbox
14143  * command fails this function will return -ENXIO.
14144  **/
14145 int
14146 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
14147 {
14148         LPFC_MBOXQ_t *mbox;
14149         int rc, length, status = 0;
14150         uint32_t shdr_status, shdr_add_status;
14151         union lpfc_sli4_cfg_shdr *shdr;
14152
14153         /* sanity check on queue memory */
14154         if (!eq)
14155                 return -ENODEV;
14156         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
14157         if (!mbox)
14158                 return -ENOMEM;
14159         length = (sizeof(struct lpfc_mbx_eq_destroy) -
14160                   sizeof(struct lpfc_sli4_cfg_mhdr));
14161         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14162                          LPFC_MBOX_OPCODE_EQ_DESTROY,
14163                          length, LPFC_SLI4_MBX_EMBED);
14164         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
14165                eq->queue_id);
14166         mbox->vport = eq->phba->pport;
14167         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14168
14169         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
14170         /* The IOCTL status is embedded in the mailbox subheader. */
14171         shdr = (union lpfc_sli4_cfg_shdr *)
14172                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
14173         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14174         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14175         if (shdr_status || shdr_add_status || rc) {
14176                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14177                                 "2505 EQ_DESTROY mailbox failed with "
14178                                 "status x%x add_status x%x, mbx status x%x\n",
14179                                 shdr_status, shdr_add_status, rc);
14180                 status = -ENXIO;
14181         }
14182
14183         /* Remove eq from any list */
14184         list_del_init(&eq->list);
14185         mempool_free(mbox, eq->phba->mbox_mem_pool);
14186         return status;
14187 }
14188
14189 /**
14190  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
14191  * @cq: The queue structure associated with the queue to destroy.
14192  *
14193  * This function destroys a queue, as detailed in @cq by sending an mailbox
14194  * command, specific to the type of queue, to the HBA.
14195  *
14196  * The @cq struct is used to get the queue ID of the queue to destroy.
14197  *
14198  * On success this function will return a zero. If the queue destroy mailbox
14199  * command fails this function will return -ENXIO.
14200  **/
14201 int
14202 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14203 {
14204         LPFC_MBOXQ_t *mbox;
14205         int rc, length, status = 0;
14206         uint32_t shdr_status, shdr_add_status;
14207         union lpfc_sli4_cfg_shdr *shdr;
14208
14209         /* sanity check on queue memory */
14210         if (!cq)
14211                 return -ENODEV;
14212         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14213         if (!mbox)
14214                 return -ENOMEM;
14215         length = (sizeof(struct lpfc_mbx_cq_destroy) -
14216                   sizeof(struct lpfc_sli4_cfg_mhdr));
14217         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14218                          LPFC_MBOX_OPCODE_CQ_DESTROY,
14219                          length, LPFC_SLI4_MBX_EMBED);
14220         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14221                cq->queue_id);
14222         mbox->vport = cq->phba->pport;
14223         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14224         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14225         /* The IOCTL status is embedded in the mailbox subheader. */
14226         shdr = (union lpfc_sli4_cfg_shdr *)
14227                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
14228         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14229         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14230         if (shdr_status || shdr_add_status || rc) {
14231                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14232                                 "2506 CQ_DESTROY mailbox failed with "
14233                                 "status x%x add_status x%x, mbx status x%x\n",
14234                                 shdr_status, shdr_add_status, rc);
14235                 status = -ENXIO;
14236         }
14237         /* Remove cq from any list */
14238         list_del_init(&cq->list);
14239         mempool_free(mbox, cq->phba->mbox_mem_pool);
14240         return status;
14241 }
14242
14243 /**
14244  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14245  * @qm: The queue structure associated with the queue to destroy.
14246  *
14247  * This function destroys a queue, as detailed in @mq by sending an mailbox
14248  * command, specific to the type of queue, to the HBA.
14249  *
14250  * The @mq struct is used to get the queue ID of the queue to destroy.
14251  *
14252  * On success this function will return a zero. If the queue destroy mailbox
14253  * command fails this function will return -ENXIO.
14254  **/
14255 int
14256 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14257 {
14258         LPFC_MBOXQ_t *mbox;
14259         int rc, length, status = 0;
14260         uint32_t shdr_status, shdr_add_status;
14261         union lpfc_sli4_cfg_shdr *shdr;
14262
14263         /* sanity check on queue memory */
14264         if (!mq)
14265                 return -ENODEV;
14266         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14267         if (!mbox)
14268                 return -ENOMEM;
14269         length = (sizeof(struct lpfc_mbx_mq_destroy) -
14270                   sizeof(struct lpfc_sli4_cfg_mhdr));
14271         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14272                          LPFC_MBOX_OPCODE_MQ_DESTROY,
14273                          length, LPFC_SLI4_MBX_EMBED);
14274         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14275                mq->queue_id);
14276         mbox->vport = mq->phba->pport;
14277         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14278         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14279         /* The IOCTL status is embedded in the mailbox subheader. */
14280         shdr = (union lpfc_sli4_cfg_shdr *)
14281                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14282         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14283         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14284         if (shdr_status || shdr_add_status || rc) {
14285                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14286                                 "2507 MQ_DESTROY mailbox failed with "
14287                                 "status x%x add_status x%x, mbx status x%x\n",
14288                                 shdr_status, shdr_add_status, rc);
14289                 status = -ENXIO;
14290         }
14291         /* Remove mq from any list */
14292         list_del_init(&mq->list);
14293         mempool_free(mbox, mq->phba->mbox_mem_pool);
14294         return status;
14295 }
14296
14297 /**
14298  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14299  * @wq: The queue structure associated with the queue to destroy.
14300  *
14301  * This function destroys a queue, as detailed in @wq by sending an mailbox
14302  * command, specific to the type of queue, to the HBA.
14303  *
14304  * The @wq struct is used to get the queue ID of the queue to destroy.
14305  *
14306  * On success this function will return a zero. If the queue destroy mailbox
14307  * command fails this function will return -ENXIO.
14308  **/
14309 int
14310 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14311 {
14312         LPFC_MBOXQ_t *mbox;
14313         int rc, length, status = 0;
14314         uint32_t shdr_status, shdr_add_status;
14315         union lpfc_sli4_cfg_shdr *shdr;
14316
14317         /* sanity check on queue memory */
14318         if (!wq)
14319                 return -ENODEV;
14320         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14321         if (!mbox)
14322                 return -ENOMEM;
14323         length = (sizeof(struct lpfc_mbx_wq_destroy) -
14324                   sizeof(struct lpfc_sli4_cfg_mhdr));
14325         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14326                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14327                          length, LPFC_SLI4_MBX_EMBED);
14328         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14329                wq->queue_id);
14330         mbox->vport = wq->phba->pport;
14331         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14332         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14333         shdr = (union lpfc_sli4_cfg_shdr *)
14334                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14335         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14336         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14337         if (shdr_status || shdr_add_status || rc) {
14338                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14339                                 "2508 WQ_DESTROY mailbox failed with "
14340                                 "status x%x add_status x%x, mbx status x%x\n",
14341                                 shdr_status, shdr_add_status, rc);
14342                 status = -ENXIO;
14343         }
14344         /* Remove wq from any list */
14345         list_del_init(&wq->list);
14346         mempool_free(mbox, wq->phba->mbox_mem_pool);
14347         return status;
14348 }
14349
14350 /**
14351  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14352  * @rq: The queue structure associated with the queue to destroy.
14353  *
14354  * This function destroys a queue, as detailed in @rq by sending an mailbox
14355  * command, specific to the type of queue, to the HBA.
14356  *
14357  * The @rq struct is used to get the queue ID of the queue to destroy.
14358  *
14359  * On success this function will return a zero. If the queue destroy mailbox
14360  * command fails this function will return -ENXIO.
14361  **/
14362 int
14363 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14364                 struct lpfc_queue *drq)
14365 {
14366         LPFC_MBOXQ_t *mbox;
14367         int rc, length, status = 0;
14368         uint32_t shdr_status, shdr_add_status;
14369         union lpfc_sli4_cfg_shdr *shdr;
14370
14371         /* sanity check on queue memory */
14372         if (!hrq || !drq)
14373                 return -ENODEV;
14374         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14375         if (!mbox)
14376                 return -ENOMEM;
14377         length = (sizeof(struct lpfc_mbx_rq_destroy) -
14378                   sizeof(struct lpfc_sli4_cfg_mhdr));
14379         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14380                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14381                          length, LPFC_SLI4_MBX_EMBED);
14382         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14383                hrq->queue_id);
14384         mbox->vport = hrq->phba->pport;
14385         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14386         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14387         /* The IOCTL status is embedded in the mailbox subheader. */
14388         shdr = (union lpfc_sli4_cfg_shdr *)
14389                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14390         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14391         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14392         if (shdr_status || shdr_add_status || rc) {
14393                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14394                                 "2509 RQ_DESTROY mailbox failed with "
14395                                 "status x%x add_status x%x, mbx status x%x\n",
14396                                 shdr_status, shdr_add_status, rc);
14397                 if (rc != MBX_TIMEOUT)
14398                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
14399                 return -ENXIO;
14400         }
14401         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14402                drq->queue_id);
14403         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14404         shdr = (union lpfc_sli4_cfg_shdr *)
14405                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14406         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14407         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14408         if (shdr_status || shdr_add_status || rc) {
14409                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14410                                 "2510 RQ_DESTROY mailbox failed with "
14411                                 "status x%x add_status x%x, mbx status x%x\n",
14412                                 shdr_status, shdr_add_status, rc);
14413                 status = -ENXIO;
14414         }
14415         list_del_init(&hrq->list);
14416         list_del_init(&drq->list);
14417         mempool_free(mbox, hrq->phba->mbox_mem_pool);
14418         return status;
14419 }
14420
14421 /**
14422  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14423  * @phba: The virtual port for which this call being executed.
14424  * @pdma_phys_addr0: Physical address of the 1st SGL page.
14425  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14426  * @xritag: the xritag that ties this io to the SGL pages.
14427  *
14428  * This routine will post the sgl pages for the IO that has the xritag
14429  * that is in the iocbq structure. The xritag is assigned during iocbq
14430  * creation and persists for as long as the driver is loaded.
14431  * if the caller has fewer than 256 scatter gather segments to map then
14432  * pdma_phys_addr1 should be 0.
14433  * If the caller needs to map more than 256 scatter gather segment then
14434  * pdma_phys_addr1 should be a valid physical address.
14435  * physical address for SGLs must be 64 byte aligned.
14436  * If you are going to map 2 SGL's then the first one must have 256 entries
14437  * the second sgl can have between 1 and 256 entries.
14438  *
14439  * Return codes:
14440  *      0 - Success
14441  *      -ENXIO, -ENOMEM - Failure
14442  **/
14443 int
14444 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14445                 dma_addr_t pdma_phys_addr0,
14446                 dma_addr_t pdma_phys_addr1,
14447                 uint16_t xritag)
14448 {
14449         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
14450         LPFC_MBOXQ_t *mbox;
14451         int rc;
14452         uint32_t shdr_status, shdr_add_status;
14453         uint32_t mbox_tmo;
14454         union lpfc_sli4_cfg_shdr *shdr;
14455
14456         if (xritag == NO_XRI) {
14457                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14458                                 "0364 Invalid param:\n");
14459                 return -EINVAL;
14460         }
14461
14462         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14463         if (!mbox)
14464                 return -ENOMEM;
14465
14466         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14467                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
14468                         sizeof(struct lpfc_mbx_post_sgl_pages) -
14469                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14470
14471         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
14472                                 &mbox->u.mqe.un.post_sgl_pages;
14473         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
14474         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
14475
14476         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
14477                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
14478         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
14479                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
14480
14481         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
14482                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
14483         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
14484                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
14485         if (!phba->sli4_hba.intr_enable)
14486                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14487         else {
14488                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14489                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14490         }
14491         /* The IOCTL status is embedded in the mailbox subheader. */
14492         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
14493         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14494         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14495         if (rc != MBX_TIMEOUT)
14496                 mempool_free(mbox, phba->mbox_mem_pool);
14497         if (shdr_status || shdr_add_status || rc) {
14498                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14499                                 "2511 POST_SGL mailbox failed with "
14500                                 "status x%x add_status x%x, mbx status x%x\n",
14501                                 shdr_status, shdr_add_status, rc);
14502         }
14503         return 0;
14504 }
14505
14506 /**
14507  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
14508  * @phba: pointer to lpfc hba data structure.
14509  *
14510  * This routine is invoked to post rpi header templates to the
14511  * HBA consistent with the SLI-4 interface spec.  This routine
14512  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14513  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14514  *
14515  * Returns
14516  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14517  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
14518  **/
14519 static uint16_t
14520 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14521 {
14522         unsigned long xri;
14523
14524         /*
14525          * Fetch the next logical xri.  Because this index is logical,
14526          * the driver starts at 0 each time.
14527          */
14528         spin_lock_irq(&phba->hbalock);
14529         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
14530                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
14531         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
14532                 spin_unlock_irq(&phba->hbalock);
14533                 return NO_XRI;
14534         } else {
14535                 set_bit(xri, phba->sli4_hba.xri_bmask);
14536                 phba->sli4_hba.max_cfg_param.xri_used++;
14537         }
14538         spin_unlock_irq(&phba->hbalock);
14539         return xri;
14540 }
14541
14542 /**
14543  * lpfc_sli4_free_xri - Release an xri for reuse.
14544  * @phba: pointer to lpfc hba data structure.
14545  *
14546  * This routine is invoked to release an xri to the pool of
14547  * available rpis maintained by the driver.
14548  **/
14549 static void
14550 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14551 {
14552         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
14553                 phba->sli4_hba.max_cfg_param.xri_used--;
14554         }
14555 }
14556
14557 /**
14558  * lpfc_sli4_free_xri - Release an xri for reuse.
14559  * @phba: pointer to lpfc hba data structure.
14560  *
14561  * This routine is invoked to release an xri to the pool of
14562  * available rpis maintained by the driver.
14563  **/
14564 void
14565 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14566 {
14567         spin_lock_irq(&phba->hbalock);
14568         __lpfc_sli4_free_xri(phba, xri);
14569         spin_unlock_irq(&phba->hbalock);
14570 }
14571
14572 /**
14573  * lpfc_sli4_next_xritag - Get an xritag for the io
14574  * @phba: Pointer to HBA context object.
14575  *
14576  * This function gets an xritag for the iocb. If there is no unused xritag
14577  * it will return 0xffff.
14578  * The function returns the allocated xritag if successful, else returns zero.
14579  * Zero is not a valid xritag.
14580  * The caller is not required to hold any lock.
14581  **/
14582 uint16_t
14583 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
14584 {
14585         uint16_t xri_index;
14586
14587         xri_index = lpfc_sli4_alloc_xri(phba);
14588         if (xri_index == NO_XRI)
14589                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14590                                 "2004 Failed to allocate XRI.last XRITAG is %d"
14591                                 " Max XRI is %d, Used XRI is %d\n",
14592                                 xri_index,
14593                                 phba->sli4_hba.max_cfg_param.max_xri,
14594                                 phba->sli4_hba.max_cfg_param.xri_used);
14595         return xri_index;
14596 }
14597
14598 /**
14599  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
14600  * @phba: pointer to lpfc hba data structure.
14601  * @post_sgl_list: pointer to els sgl entry list.
14602  * @count: number of els sgl entries on the list.
14603  *
14604  * This routine is invoked to post a block of driver's sgl pages to the
14605  * HBA using non-embedded mailbox command. No Lock is held. This routine
14606  * is only called when the driver is loading and after all IO has been
14607  * stopped.
14608  **/
14609 static int
14610 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
14611                             struct list_head *post_sgl_list,
14612                             int post_cnt)
14613 {
14614         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
14615         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14616         struct sgl_page_pairs *sgl_pg_pairs;
14617         void *viraddr;
14618         LPFC_MBOXQ_t *mbox;
14619         uint32_t reqlen, alloclen, pg_pairs;
14620         uint32_t mbox_tmo;
14621         uint16_t xritag_start = 0;
14622         int rc = 0;
14623         uint32_t shdr_status, shdr_add_status;
14624         union lpfc_sli4_cfg_shdr *shdr;
14625
14626         reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
14627                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14628         if (reqlen > SLI4_PAGE_SIZE) {
14629                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14630                                 "2559 Block sgl registration required DMA "
14631                                 "size (%d) great than a page\n", reqlen);
14632                 return -ENOMEM;
14633         }
14634         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14635         if (!mbox)
14636                 return -ENOMEM;
14637
14638         /* Allocate DMA memory and set up the non-embedded mailbox command */
14639         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14640                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14641                          LPFC_SLI4_MBX_NEMBED);
14642
14643         if (alloclen < reqlen) {
14644                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14645                                 "0285 Allocated DMA memory size (%d) is "
14646                                 "less than the requested DMA memory "
14647                                 "size (%d)\n", alloclen, reqlen);
14648                 lpfc_sli4_mbox_cmd_free(phba, mbox);
14649                 return -ENOMEM;
14650         }
14651         /* Set up the SGL pages in the non-embedded DMA pages */
14652         viraddr = mbox->sge_array->addr[0];
14653         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14654         sgl_pg_pairs = &sgl->sgl_pg_pairs;
14655
14656         pg_pairs = 0;
14657         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
14658                 /* Set up the sge entry */
14659                 sgl_pg_pairs->sgl_pg0_addr_lo =
14660                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
14661                 sgl_pg_pairs->sgl_pg0_addr_hi =
14662                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
14663                 sgl_pg_pairs->sgl_pg1_addr_lo =
14664                                 cpu_to_le32(putPaddrLow(0));
14665                 sgl_pg_pairs->sgl_pg1_addr_hi =
14666                                 cpu_to_le32(putPaddrHigh(0));
14667
14668                 /* Keep the first xritag on the list */
14669                 if (pg_pairs == 0)
14670                         xritag_start = sglq_entry->sli4_xritag;
14671                 sgl_pg_pairs++;
14672                 pg_pairs++;
14673         }
14674
14675         /* Complete initialization and perform endian conversion. */
14676         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14677         bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
14678         sgl->word0 = cpu_to_le32(sgl->word0);
14679         if (!phba->sli4_hba.intr_enable)
14680                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14681         else {
14682                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14683                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14684         }
14685         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14686         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14687         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14688         if (rc != MBX_TIMEOUT)
14689                 lpfc_sli4_mbox_cmd_free(phba, mbox);
14690         if (shdr_status || shdr_add_status || rc) {
14691                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14692                                 "2513 POST_SGL_BLOCK mailbox command failed "
14693                                 "status x%x add_status x%x mbx status x%x\n",
14694                                 shdr_status, shdr_add_status, rc);
14695                 rc = -ENXIO;
14696         }
14697         return rc;
14698 }
14699
14700 /**
14701  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
14702  * @phba: pointer to lpfc hba data structure.
14703  * @sblist: pointer to scsi buffer list.
14704  * @count: number of scsi buffers on the list.
14705  *
14706  * This routine is invoked to post a block of @count scsi sgl pages from a
14707  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
14708  * No Lock is held.
14709  *
14710  **/
14711 int
14712 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
14713                               struct list_head *sblist,
14714                               int count)
14715 {
14716         struct lpfc_scsi_buf *psb;
14717         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14718         struct sgl_page_pairs *sgl_pg_pairs;
14719         void *viraddr;
14720         LPFC_MBOXQ_t *mbox;
14721         uint32_t reqlen, alloclen, pg_pairs;
14722         uint32_t mbox_tmo;
14723         uint16_t xritag_start = 0;
14724         int rc = 0;
14725         uint32_t shdr_status, shdr_add_status;
14726         dma_addr_t pdma_phys_bpl1;
14727         union lpfc_sli4_cfg_shdr *shdr;
14728
14729         /* Calculate the requested length of the dma memory */
14730         reqlen = count * sizeof(struct sgl_page_pairs) +
14731                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14732         if (reqlen > SLI4_PAGE_SIZE) {
14733                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14734                                 "0217 Block sgl registration required DMA "
14735                                 "size (%d) great than a page\n", reqlen);
14736                 return -ENOMEM;
14737         }
14738         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14739         if (!mbox) {
14740                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14741                                 "0283 Failed to allocate mbox cmd memory\n");
14742                 return -ENOMEM;
14743         }
14744
14745         /* Allocate DMA memory and set up the non-embedded mailbox command */
14746         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14747                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14748                                 LPFC_SLI4_MBX_NEMBED);
14749
14750         if (alloclen < reqlen) {
14751                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14752                                 "2561 Allocated DMA memory size (%d) is "
14753                                 "less than the requested DMA memory "
14754                                 "size (%d)\n", alloclen, reqlen);
14755                 lpfc_sli4_mbox_cmd_free(phba, mbox);
14756                 return -ENOMEM;
14757         }
14758
14759         /* Get the first SGE entry from the non-embedded DMA memory */
14760         viraddr = mbox->sge_array->addr[0];
14761
14762         /* Set up the SGL pages in the non-embedded DMA pages */
14763         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14764         sgl_pg_pairs = &sgl->sgl_pg_pairs;
14765
14766         pg_pairs = 0;
14767         list_for_each_entry(psb, sblist, list) {
14768                 /* Set up the sge entry */
14769                 sgl_pg_pairs->sgl_pg0_addr_lo =
14770                         cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
14771                 sgl_pg_pairs->sgl_pg0_addr_hi =
14772                         cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
14773                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
14774                         pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
14775                 else
14776                         pdma_phys_bpl1 = 0;
14777                 sgl_pg_pairs->sgl_pg1_addr_lo =
14778                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
14779                 sgl_pg_pairs->sgl_pg1_addr_hi =
14780                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
14781                 /* Keep the first xritag on the list */
14782                 if (pg_pairs == 0)
14783                         xritag_start = psb->cur_iocbq.sli4_xritag;
14784                 sgl_pg_pairs++;
14785                 pg_pairs++;
14786         }
14787         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14788         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
14789         /* Perform endian conversion if necessary */
14790         sgl->word0 = cpu_to_le32(sgl->word0);
14791
14792         if (!phba->sli4_hba.intr_enable)
14793                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14794         else {
14795                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14796                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14797         }
14798         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14799         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14800         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14801         if (rc != MBX_TIMEOUT)
14802                 lpfc_sli4_mbox_cmd_free(phba, mbox);
14803         if (shdr_status || shdr_add_status || rc) {
14804                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14805                                 "2564 POST_SGL_BLOCK mailbox command failed "
14806                                 "status x%x add_status x%x mbx status x%x\n",
14807                                 shdr_status, shdr_add_status, rc);
14808                 rc = -ENXIO;
14809         }
14810         return rc;
14811 }
14812
14813 /**
14814  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
14815  * @phba: pointer to lpfc_hba struct that the frame was received on
14816  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14817  *
14818  * This function checks the fields in the @fc_hdr to see if the FC frame is a
14819  * valid type of frame that the LPFC driver will handle. This function will
14820  * return a zero if the frame is a valid frame or a non zero value when the
14821  * frame does not pass the check.
14822  **/
14823 static int
14824 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
14825 {
14826         /*  make rctl_names static to save stack space */
14827         static char *rctl_names[] = FC_RCTL_NAMES_INIT;
14828         char *type_names[] = FC_TYPE_NAMES_INIT;
14829         struct fc_vft_header *fc_vft_hdr;
14830         uint32_t *header = (uint32_t *) fc_hdr;
14831
14832         switch (fc_hdr->fh_r_ctl) {
14833         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
14834         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
14835         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
14836         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
14837         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
14838         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
14839         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
14840         case FC_RCTL_DD_CMD_STATUS:     /* command status */
14841         case FC_RCTL_ELS_REQ:   /* extended link services request */
14842         case FC_RCTL_ELS_REP:   /* extended link services reply */
14843         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
14844         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
14845         case FC_RCTL_BA_NOP:    /* basic link service NOP */
14846         case FC_RCTL_BA_ABTS:   /* basic link service abort */
14847         case FC_RCTL_BA_RMC:    /* remove connection */
14848         case FC_RCTL_BA_ACC:    /* basic accept */
14849         case FC_RCTL_BA_RJT:    /* basic reject */
14850         case FC_RCTL_BA_PRMT:
14851         case FC_RCTL_ACK_1:     /* acknowledge_1 */
14852         case FC_RCTL_ACK_0:     /* acknowledge_0 */
14853         case FC_RCTL_P_RJT:     /* port reject */
14854         case FC_RCTL_F_RJT:     /* fabric reject */
14855         case FC_RCTL_P_BSY:     /* port busy */
14856         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
14857         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
14858         case FC_RCTL_LCR:       /* link credit reset */
14859         case FC_RCTL_END:       /* end */
14860                 break;
14861         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
14862                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14863                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
14864                 return lpfc_fc_frame_check(phba, fc_hdr);
14865         default:
14866                 goto drop;
14867         }
14868         switch (fc_hdr->fh_type) {
14869         case FC_TYPE_BLS:
14870         case FC_TYPE_ELS:
14871         case FC_TYPE_FCP:
14872         case FC_TYPE_CT:
14873                 break;
14874         case FC_TYPE_IP:
14875         case FC_TYPE_ILS:
14876         default:
14877                 goto drop;
14878         }
14879
14880         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
14881                         "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14882                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14883                         rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
14884                         type_names[fc_hdr->fh_type], fc_hdr->fh_type,
14885                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
14886                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
14887                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14888                         be32_to_cpu(header[6]));
14889         return 0;
14890 drop:
14891         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14892                         "2539 Dropped frame rctl:%s type:%s\n",
14893                         rctl_names[fc_hdr->fh_r_ctl],
14894                         type_names[fc_hdr->fh_type]);
14895         return 1;
14896 }
14897
14898 /**
14899  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14900  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14901  *
14902  * This function processes the FC header to retrieve the VFI from the VF
14903  * header, if one exists. This function will return the VFI if one exists
14904  * or 0 if no VSAN Header exists.
14905  **/
14906 static uint32_t
14907 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14908 {
14909         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14910
14911         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14912                 return 0;
14913         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14914 }
14915
14916 /**
14917  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14918  * @phba: Pointer to the HBA structure to search for the vport on
14919  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14920  * @fcfi: The FC Fabric ID that the frame came from
14921  *
14922  * This function searches the @phba for a vport that matches the content of the
14923  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14924  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14925  * returns the matching vport pointer or NULL if unable to match frame to a
14926  * vport.
14927  **/
14928 static struct lpfc_vport *
14929 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14930                        uint16_t fcfi)
14931 {
14932         struct lpfc_vport **vports;
14933         struct lpfc_vport *vport = NULL;
14934         int i;
14935         uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14936                         fc_hdr->fh_d_id[1] << 8 |
14937                         fc_hdr->fh_d_id[2]);
14938
14939         if (did == Fabric_DID)
14940                 return phba->pport;
14941         if ((phba->pport->fc_flag & FC_PT2PT) &&
14942                 !(phba->link_state == LPFC_HBA_READY))
14943                 return phba->pport;
14944
14945         vports = lpfc_create_vport_work_array(phba);
14946         if (vports != NULL)
14947                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14948                         if (phba->fcf.fcfi == fcfi &&
14949                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14950                             vports[i]->fc_myDID == did) {
14951                                 vport = vports[i];
14952                                 break;
14953                         }
14954                 }
14955         lpfc_destroy_vport_work_array(phba, vports);
14956         return vport;
14957 }
14958
14959 /**
14960  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14961  * @vport: The vport to work on.
14962  *
14963  * This function updates the receive sequence time stamp for this vport. The
14964  * receive sequence time stamp indicates the time that the last frame of the
14965  * the sequence that has been idle for the longest amount of time was received.
14966  * the driver uses this time stamp to indicate if any received sequences have
14967  * timed out.
14968  **/
14969 static void
14970 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14971 {
14972         struct lpfc_dmabuf *h_buf;
14973         struct hbq_dmabuf *dmabuf = NULL;
14974
14975         /* get the oldest sequence on the rcv list */
14976         h_buf = list_get_first(&vport->rcv_buffer_list,
14977                                struct lpfc_dmabuf, list);
14978         if (!h_buf)
14979                 return;
14980         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14981         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14982 }
14983
14984 /**
14985  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14986  * @vport: The vport that the received sequences were sent to.
14987  *
14988  * This function cleans up all outstanding received sequences. This is called
14989  * by the driver when a link event or user action invalidates all the received
14990  * sequences.
14991  **/
14992 void
14993 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14994 {
14995         struct lpfc_dmabuf *h_buf, *hnext;
14996         struct lpfc_dmabuf *d_buf, *dnext;
14997         struct hbq_dmabuf *dmabuf = NULL;
14998
14999         /* start with the oldest sequence on the rcv list */
15000         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15001                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15002                 list_del_init(&dmabuf->hbuf.list);
15003                 list_for_each_entry_safe(d_buf, dnext,
15004                                          &dmabuf->dbuf.list, list) {
15005                         list_del_init(&d_buf->list);
15006                         lpfc_in_buf_free(vport->phba, d_buf);
15007                 }
15008                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15009         }
15010 }
15011
15012 /**
15013  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
15014  * @vport: The vport that the received sequences were sent to.
15015  *
15016  * This function determines whether any received sequences have timed out by
15017  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
15018  * indicates that there is at least one timed out sequence this routine will
15019  * go through the received sequences one at a time from most inactive to most
15020  * active to determine which ones need to be cleaned up. Once it has determined
15021  * that a sequence needs to be cleaned up it will simply free up the resources
15022  * without sending an abort.
15023  **/
15024 void
15025 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
15026 {
15027         struct lpfc_dmabuf *h_buf, *hnext;
15028         struct lpfc_dmabuf *d_buf, *dnext;
15029         struct hbq_dmabuf *dmabuf = NULL;
15030         unsigned long timeout;
15031         int abort_count = 0;
15032
15033         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15034                    vport->rcv_buffer_time_stamp);
15035         if (list_empty(&vport->rcv_buffer_list) ||
15036             time_before(jiffies, timeout))
15037                 return;
15038         /* start with the oldest sequence on the rcv list */
15039         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15040                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15041                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15042                            dmabuf->time_stamp);
15043                 if (time_before(jiffies, timeout))
15044                         break;
15045                 abort_count++;
15046                 list_del_init(&dmabuf->hbuf.list);
15047                 list_for_each_entry_safe(d_buf, dnext,
15048                                          &dmabuf->dbuf.list, list) {
15049                         list_del_init(&d_buf->list);
15050                         lpfc_in_buf_free(vport->phba, d_buf);
15051                 }
15052                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15053         }
15054         if (abort_count)
15055                 lpfc_update_rcv_time_stamp(vport);
15056 }
15057
15058 /**
15059  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
15060  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
15061  *
15062  * This function searches through the existing incomplete sequences that have
15063  * been sent to this @vport. If the frame matches one of the incomplete
15064  * sequences then the dbuf in the @dmabuf is added to the list of frames that
15065  * make up that sequence. If no sequence is found that matches this frame then
15066  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
15067  * This function returns a pointer to the first dmabuf in the sequence list that
15068  * the frame was linked to.
15069  **/
15070 static struct hbq_dmabuf *
15071 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15072 {
15073         struct fc_frame_header *new_hdr;
15074         struct fc_frame_header *temp_hdr;
15075         struct lpfc_dmabuf *d_buf;
15076         struct lpfc_dmabuf *h_buf;
15077         struct hbq_dmabuf *seq_dmabuf = NULL;
15078         struct hbq_dmabuf *temp_dmabuf = NULL;
15079         uint8_t found = 0;
15080
15081         INIT_LIST_HEAD(&dmabuf->dbuf.list);
15082         dmabuf->time_stamp = jiffies;
15083         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15084
15085         /* Use the hdr_buf to find the sequence that this frame belongs to */
15086         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15087                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15088                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15089                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15090                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15091                         continue;
15092                 /* found a pending sequence that matches this frame */
15093                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15094                 break;
15095         }
15096         if (!seq_dmabuf) {
15097                 /*
15098                  * This indicates first frame received for this sequence.
15099                  * Queue the buffer on the vport's rcv_buffer_list.
15100                  */
15101                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15102                 lpfc_update_rcv_time_stamp(vport);
15103                 return dmabuf;
15104         }
15105         temp_hdr = seq_dmabuf->hbuf.virt;
15106         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
15107                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15108                 list_del_init(&seq_dmabuf->hbuf.list);
15109                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15110                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15111                 lpfc_update_rcv_time_stamp(vport);
15112                 return dmabuf;
15113         }
15114         /* move this sequence to the tail to indicate a young sequence */
15115         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
15116         seq_dmabuf->time_stamp = jiffies;
15117         lpfc_update_rcv_time_stamp(vport);
15118         if (list_empty(&seq_dmabuf->dbuf.list)) {
15119                 temp_hdr = dmabuf->hbuf.virt;
15120                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15121                 return seq_dmabuf;
15122         }
15123         /* find the correct place in the sequence to insert this frame */
15124         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
15125         while (!found) {
15126                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15127                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
15128                 /*
15129                  * If the frame's sequence count is greater than the frame on
15130                  * the list then insert the frame right after this frame
15131                  */
15132                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
15133                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15134                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
15135                         found = 1;
15136                         break;
15137                 }
15138
15139                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
15140                         break;
15141                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
15142         }
15143
15144         if (found)
15145                 return seq_dmabuf;
15146         return NULL;
15147 }
15148
15149 /**
15150  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
15151  * @vport: pointer to a vitural port
15152  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15153  *
15154  * This function tries to abort from the partially assembed sequence, described
15155  * by the information from basic abbort @dmabuf. It checks to see whether such
15156  * partially assembled sequence held by the driver. If so, it shall free up all
15157  * the frames from the partially assembled sequence.
15158  *
15159  * Return
15160  * true  -- if there is matching partially assembled sequence present and all
15161  *          the frames freed with the sequence;
15162  * false -- if there is no matching partially assembled sequence present so
15163  *          nothing got aborted in the lower layer driver
15164  **/
15165 static bool
15166 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
15167                             struct hbq_dmabuf *dmabuf)
15168 {
15169         struct fc_frame_header *new_hdr;
15170         struct fc_frame_header *temp_hdr;
15171         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
15172         struct hbq_dmabuf *seq_dmabuf = NULL;
15173
15174         /* Use the hdr_buf to find the sequence that matches this frame */
15175         INIT_LIST_HEAD(&dmabuf->dbuf.list);
15176         INIT_LIST_HEAD(&dmabuf->hbuf.list);
15177         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15178         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15179                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15180                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15181                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15182                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15183                         continue;
15184                 /* found a pending sequence that matches this frame */
15185                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15186                 break;
15187         }
15188
15189         /* Free up all the frames from the partially assembled sequence */
15190         if (seq_dmabuf) {
15191                 list_for_each_entry_safe(d_buf, n_buf,
15192                                          &seq_dmabuf->dbuf.list, list) {
15193                         list_del_init(&d_buf->list);
15194                         lpfc_in_buf_free(vport->phba, d_buf);
15195                 }
15196                 return true;
15197         }
15198         return false;
15199 }
15200
15201 /**
15202  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
15203  * @vport: pointer to a vitural port
15204  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15205  *
15206  * This function tries to abort from the assembed sequence from upper level
15207  * protocol, described by the information from basic abbort @dmabuf. It
15208  * checks to see whether such pending context exists at upper level protocol.
15209  * If so, it shall clean up the pending context.
15210  *
15211  * Return
15212  * true  -- if there is matching pending context of the sequence cleaned
15213  *          at ulp;
15214  * false -- if there is no matching pending context of the sequence present
15215  *          at ulp.
15216  **/
15217 static bool
15218 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15219 {
15220         struct lpfc_hba *phba = vport->phba;
15221         int handled;
15222
15223         /* Accepting abort at ulp with SLI4 only */
15224         if (phba->sli_rev < LPFC_SLI_REV4)
15225                 return false;
15226
15227         /* Register all caring upper level protocols to attend abort */
15228         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15229         if (handled)
15230                 return true;
15231
15232         return false;
15233 }
15234
15235 /**
15236  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
15237  * @phba: Pointer to HBA context object.
15238  * @cmd_iocbq: pointer to the command iocbq structure.
15239  * @rsp_iocbq: pointer to the response iocbq structure.
15240  *
15241  * This function handles the sequence abort response iocb command complete
15242  * event. It properly releases the memory allocated to the sequence abort
15243  * accept iocb.
15244  **/
15245 static void
15246 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
15247                              struct lpfc_iocbq *cmd_iocbq,
15248                              struct lpfc_iocbq *rsp_iocbq)
15249 {
15250         struct lpfc_nodelist *ndlp;
15251
15252         if (cmd_iocbq) {
15253                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15254                 lpfc_nlp_put(ndlp);
15255                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
15256         }
15257
15258         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
15259         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15260                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15261                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
15262                         rsp_iocbq->iocb.ulpStatus,
15263                         rsp_iocbq->iocb.un.ulpWord[4]);
15264 }
15265
15266 /**
15267  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15268  * @phba: Pointer to HBA context object.
15269  * @xri: xri id in transaction.
15270  *
15271  * This function validates the xri maps to the known range of XRIs allocated an
15272  * used by the driver.
15273  **/
15274 uint16_t
15275 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15276                       uint16_t xri)
15277 {
15278         uint16_t i;
15279
15280         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15281                 if (xri == phba->sli4_hba.xri_ids[i])
15282                         return i;
15283         }
15284         return NO_XRI;
15285 }
15286
15287 /**
15288  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
15289  * @phba: Pointer to HBA context object.
15290  * @fc_hdr: pointer to a FC frame header.
15291  *
15292  * This function sends a basic response to a previous unsol sequence abort
15293  * event after aborting the sequence handling.
15294  **/
15295 static void
15296 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15297                         struct fc_frame_header *fc_hdr, bool aborted)
15298 {
15299         struct lpfc_hba *phba = vport->phba;
15300         struct lpfc_iocbq *ctiocb = NULL;
15301         struct lpfc_nodelist *ndlp;
15302         uint16_t oxid, rxid, xri, lxri;
15303         uint32_t sid, fctl;
15304         IOCB_t *icmd;
15305         int rc;
15306
15307         if (!lpfc_is_link_up(phba))
15308                 return;
15309
15310         sid = sli4_sid_from_fc_hdr(fc_hdr);
15311         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15312         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
15313
15314         ndlp = lpfc_findnode_did(vport, sid);
15315         if (!ndlp) {
15316                 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15317                 if (!ndlp) {
15318                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15319                                          "1268 Failed to allocate ndlp for "
15320                                          "oxid:x%x SID:x%x\n", oxid, sid);
15321                         return;
15322                 }
15323                 lpfc_nlp_init(vport, ndlp, sid);
15324                 /* Put ndlp onto pport node list */
15325                 lpfc_enqueue_node(vport, ndlp);
15326         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
15327                 /* re-setup ndlp without removing from node list */
15328                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15329                 if (!ndlp) {
15330                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15331                                          "3275 Failed to active ndlp found "
15332                                          "for oxid:x%x SID:x%x\n", oxid, sid);
15333                         return;
15334                 }
15335         }
15336
15337         /* Allocate buffer for rsp iocb */
15338         ctiocb = lpfc_sli_get_iocbq(phba);
15339         if (!ctiocb)
15340                 return;
15341
15342         /* Extract the F_CTL field from FC_HDR */
15343         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15344
15345         icmd = &ctiocb->iocb;
15346         icmd->un.xseq64.bdl.bdeSize = 0;
15347         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
15348         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15349         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15350         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15351
15352         /* Fill in the rest of iocb fields */
15353         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15354         icmd->ulpBdeCount = 0;
15355         icmd->ulpLe = 1;
15356         icmd->ulpClass = CLASS3;
15357         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
15358         ctiocb->context1 = lpfc_nlp_get(ndlp);
15359
15360         ctiocb->iocb_cmpl = NULL;
15361         ctiocb->vport = phba->pport;
15362         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
15363         ctiocb->sli4_lxritag = NO_XRI;
15364         ctiocb->sli4_xritag = NO_XRI;
15365
15366         if (fctl & FC_FC_EX_CTX)
15367                 /* Exchange responder sent the abort so we
15368                  * own the oxid.
15369                  */
15370                 xri = oxid;
15371         else
15372                 xri = rxid;
15373         lxri = lpfc_sli4_xri_inrange(phba, xri);
15374         if (lxri != NO_XRI)
15375                 lpfc_set_rrq_active(phba, ndlp, lxri,
15376                         (xri == oxid) ? rxid : oxid, 0);
15377         /* For BA_ABTS from exchange responder, if the logical xri with
15378          * the oxid maps to the FCP XRI range, the port no longer has
15379          * that exchange context, send a BLS_RJT. Override the IOCB for
15380          * a BA_RJT.
15381          */
15382         if ((fctl & FC_FC_EX_CTX) &&
15383             (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
15384                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15385                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15386                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15387                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15388         }
15389
15390         /* If BA_ABTS failed to abort a partially assembled receive sequence,
15391          * the driver no longer has that exchange, send a BLS_RJT. Override
15392          * the IOCB for a BA_RJT.
15393          */
15394         if (aborted == false) {
15395                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15396                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15397                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15398                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15399         }
15400
15401         if (fctl & FC_FC_EX_CTX) {
15402                 /* ABTS sent by responder to CT exchange, construction
15403                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15404                  * field and RX_ID from ABTS for RX_ID field.
15405                  */
15406                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
15407         } else {
15408                 /* ABTS sent by initiator to CT exchange, construction
15409                  * of BA_ACC will need to allocate a new XRI as for the
15410                  * XRI_TAG field.
15411                  */
15412                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
15413         }
15414         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
15415         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
15416
15417         /* Xmit CT abts response on exchange <xid> */
15418         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15419                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15420                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
15421
15422         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15423         if (rc == IOCB_ERROR) {
15424                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15425                                  "2925 Failed to issue CT ABTS RSP x%x on "
15426                                  "xri x%x, Data x%x\n",
15427                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15428                                  phba->link_state);
15429                 lpfc_nlp_put(ndlp);
15430                 ctiocb->context1 = NULL;
15431                 lpfc_sli_release_iocbq(phba, ctiocb);
15432         }
15433 }
15434
15435 /**
15436  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
15437  * @vport: Pointer to the vport on which this sequence was received
15438  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15439  *
15440  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
15441  * receive sequence is only partially assembed by the driver, it shall abort
15442  * the partially assembled frames for the sequence. Otherwise, if the
15443  * unsolicited receive sequence has been completely assembled and passed to
15444  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
15445  * unsolicited sequence has been aborted. After that, it will issue a basic
15446  * accept to accept the abort.
15447  **/
15448 static void
15449 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15450                              struct hbq_dmabuf *dmabuf)
15451 {
15452         struct lpfc_hba *phba = vport->phba;
15453         struct fc_frame_header fc_hdr;
15454         uint32_t fctl;
15455         bool aborted;
15456
15457         /* Make a copy of fc_hdr before the dmabuf being released */
15458         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
15459         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
15460
15461         if (fctl & FC_FC_EX_CTX) {
15462                 /* ABTS by responder to exchange, no cleanup needed */
15463                 aborted = true;
15464         } else {
15465                 /* ABTS by initiator to exchange, need to do cleanup */
15466                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
15467                 if (aborted == false)
15468                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
15469         }
15470         lpfc_in_buf_free(phba, &dmabuf->dbuf);
15471
15472         /* Respond with BA_ACC or BA_RJT accordingly */
15473         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
15474 }
15475
15476 /**
15477  * lpfc_seq_complete - Indicates if a sequence is complete
15478  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15479  *
15480  * This function checks the sequence, starting with the frame described by
15481  * @dmabuf, to see if all the frames associated with this sequence are present.
15482  * the frames associated with this sequence are linked to the @dmabuf using the
15483  * dbuf list. This function looks for two major things. 1) That the first frame
15484  * has a sequence count of zero. 2) There is a frame with last frame of sequence
15485  * set. 3) That there are no holes in the sequence count. The function will
15486  * return 1 when the sequence is complete, otherwise it will return 0.
15487  **/
15488 static int
15489 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
15490 {
15491         struct fc_frame_header *hdr;
15492         struct lpfc_dmabuf *d_buf;
15493         struct hbq_dmabuf *seq_dmabuf;
15494         uint32_t fctl;
15495         int seq_count = 0;
15496
15497         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15498         /* make sure first fame of sequence has a sequence count of zero */
15499         if (hdr->fh_seq_cnt != seq_count)
15500                 return 0;
15501         fctl = (hdr->fh_f_ctl[0] << 16 |
15502                 hdr->fh_f_ctl[1] << 8 |
15503                 hdr->fh_f_ctl[2]);
15504         /* If last frame of sequence we can return success. */
15505         if (fctl & FC_FC_END_SEQ)
15506                 return 1;
15507         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
15508                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15509                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15510                 /* If there is a hole in the sequence count then fail. */
15511                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
15512                         return 0;
15513                 fctl = (hdr->fh_f_ctl[0] << 16 |
15514                         hdr->fh_f_ctl[1] << 8 |
15515                         hdr->fh_f_ctl[2]);
15516                 /* If last frame of sequence we can return success. */
15517                 if (fctl & FC_FC_END_SEQ)
15518                         return 1;
15519         }
15520         return 0;
15521 }
15522
15523 /**
15524  * lpfc_prep_seq - Prep sequence for ULP processing
15525  * @vport: Pointer to the vport on which this sequence was received
15526  * @dmabuf: pointer to a dmabuf that describes the FC sequence
15527  *
15528  * This function takes a sequence, described by a list of frames, and creates
15529  * a list of iocbq structures to describe the sequence. This iocbq list will be
15530  * used to issue to the generic unsolicited sequence handler. This routine
15531  * returns a pointer to the first iocbq in the list. If the function is unable
15532  * to allocate an iocbq then it throw out the received frames that were not
15533  * able to be described and return a pointer to the first iocbq. If unable to
15534  * allocate any iocbqs (including the first) this function will return NULL.
15535  **/
15536 static struct lpfc_iocbq *
15537 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
15538 {
15539         struct hbq_dmabuf *hbq_buf;
15540         struct lpfc_dmabuf *d_buf, *n_buf;
15541         struct lpfc_iocbq *first_iocbq, *iocbq;
15542         struct fc_frame_header *fc_hdr;
15543         uint32_t sid;
15544         uint32_t len, tot_len;
15545         struct ulp_bde64 *pbde;
15546
15547         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15548         /* remove from receive buffer list */
15549         list_del_init(&seq_dmabuf->hbuf.list);
15550         lpfc_update_rcv_time_stamp(vport);
15551         /* get the Remote Port's SID */
15552         sid = sli4_sid_from_fc_hdr(fc_hdr);
15553         tot_len = 0;
15554         /* Get an iocbq struct to fill in. */
15555         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
15556         if (first_iocbq) {
15557                 /* Initialize the first IOCB. */
15558                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
15559                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
15560
15561                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
15562                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
15563                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
15564                         first_iocbq->iocb.un.rcvels.parmRo =
15565                                 sli4_did_from_fc_hdr(fc_hdr);
15566                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
15567                 } else
15568                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
15569                 first_iocbq->iocb.ulpContext = NO_XRI;
15570                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
15571                         be16_to_cpu(fc_hdr->fh_ox_id);
15572                 /* iocbq is prepped for internal consumption.  Physical vpi. */
15573                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
15574                         vport->phba->vpi_ids[vport->vpi];
15575                 /* put the first buffer into the first IOCBq */
15576                 tot_len = bf_get(lpfc_rcqe_length,
15577                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
15578
15579                 first_iocbq->context2 = &seq_dmabuf->dbuf;
15580                 first_iocbq->context3 = NULL;
15581                 first_iocbq->iocb.ulpBdeCount = 1;
15582                 if (tot_len > LPFC_DATA_BUF_SIZE)
15583                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15584                                                         LPFC_DATA_BUF_SIZE;
15585                 else
15586                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
15587
15588                 first_iocbq->iocb.un.rcvels.remoteID = sid;
15589
15590                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15591         }
15592         iocbq = first_iocbq;
15593         /*
15594          * Each IOCBq can have two Buffers assigned, so go through the list
15595          * of buffers for this sequence and save two buffers in each IOCBq
15596          */
15597         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
15598                 if (!iocbq) {
15599                         lpfc_in_buf_free(vport->phba, d_buf);
15600                         continue;
15601                 }
15602                 if (!iocbq->context3) {
15603                         iocbq->context3 = d_buf;
15604                         iocbq->iocb.ulpBdeCount++;
15605                         /* We need to get the size out of the right CQE */
15606                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15607                         len = bf_get(lpfc_rcqe_length,
15608                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
15609                         pbde = (struct ulp_bde64 *)
15610                                         &iocbq->iocb.unsli3.sli3Words[4];
15611                         if (len > LPFC_DATA_BUF_SIZE)
15612                                 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
15613                         else
15614                                 pbde->tus.f.bdeSize = len;
15615
15616                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
15617                         tot_len += len;
15618                 } else {
15619                         iocbq = lpfc_sli_get_iocbq(vport->phba);
15620                         if (!iocbq) {
15621                                 if (first_iocbq) {
15622                                         first_iocbq->iocb.ulpStatus =
15623                                                         IOSTAT_FCP_RSP_ERROR;
15624                                         first_iocbq->iocb.un.ulpWord[4] =
15625                                                         IOERR_NO_RESOURCES;
15626                                 }
15627                                 lpfc_in_buf_free(vport->phba, d_buf);
15628                                 continue;
15629                         }
15630                         /* We need to get the size out of the right CQE */
15631                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15632                         len = bf_get(lpfc_rcqe_length,
15633                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
15634                         iocbq->context2 = d_buf;
15635                         iocbq->context3 = NULL;
15636                         iocbq->iocb.ulpBdeCount = 1;
15637                         if (len > LPFC_DATA_BUF_SIZE)
15638                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15639                                                         LPFC_DATA_BUF_SIZE;
15640                         else
15641                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
15642
15643                         tot_len += len;
15644                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15645
15646                         iocbq->iocb.un.rcvels.remoteID = sid;
15647                         list_add_tail(&iocbq->list, &first_iocbq->list);
15648                 }
15649         }
15650         /* Free the sequence's header buffer */
15651         if (!first_iocbq)
15652                 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
15653
15654         return first_iocbq;
15655 }
15656
15657 static void
15658 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
15659                           struct hbq_dmabuf *seq_dmabuf)
15660 {
15661         struct fc_frame_header *fc_hdr;
15662         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
15663         struct lpfc_hba *phba = vport->phba;
15664
15665         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15666         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
15667         if (!iocbq) {
15668                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15669                                 "2707 Ring %d handler: Failed to allocate "
15670                                 "iocb Rctl x%x Type x%x received\n",
15671                                 LPFC_ELS_RING,
15672                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15673                 return;
15674         }
15675         if (!lpfc_complete_unsol_iocb(phba,
15676                                       &phba->sli.ring[LPFC_ELS_RING],
15677                                       iocbq, fc_hdr->fh_r_ctl,
15678                                       fc_hdr->fh_type))
15679                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15680                                 "2540 Ring %d handler: unexpected Rctl "
15681                                 "x%x Type x%x received\n",
15682                                 LPFC_ELS_RING,
15683                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15684
15685         /* Free iocb created in lpfc_prep_seq */
15686         list_for_each_entry_safe(curr_iocb, next_iocb,
15687                 &iocbq->list, list) {
15688                 list_del_init(&curr_iocb->list);
15689                 lpfc_sli_release_iocbq(phba, curr_iocb);
15690         }
15691         lpfc_sli_release_iocbq(phba, iocbq);
15692 }
15693
15694 /**
15695  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
15696  * @phba: Pointer to HBA context object.
15697  *
15698  * This function is called with no lock held. This function processes all
15699  * the received buffers and gives it to upper layers when a received buffer
15700  * indicates that it is the final frame in the sequence. The interrupt
15701  * service routine processes received buffers at interrupt contexts and adds
15702  * received dma buffers to the rb_pend_list queue and signals the worker thread.
15703  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
15704  * appropriate receive function when the final frame in a sequence is received.
15705  **/
15706 void
15707 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
15708                                  struct hbq_dmabuf *dmabuf)
15709 {
15710         struct hbq_dmabuf *seq_dmabuf;
15711         struct fc_frame_header *fc_hdr;
15712         struct lpfc_vport *vport;
15713         uint32_t fcfi;
15714         uint32_t did;
15715
15716         /* Process each received buffer */
15717         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15718         /* check to see if this a valid type of frame */
15719         if (lpfc_fc_frame_check(phba, fc_hdr)) {
15720                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15721                 return;
15722         }
15723         if ((bf_get(lpfc_cqe_code,
15724                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
15725                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
15726                               &dmabuf->cq_event.cqe.rcqe_cmpl);
15727         else
15728                 fcfi = bf_get(lpfc_rcqe_fcf_id,
15729                               &dmabuf->cq_event.cqe.rcqe_cmpl);
15730
15731         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
15732         if (!vport) {
15733                 /* throw out the frame */
15734                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15735                 return;
15736         }
15737
15738         /* d_id this frame is directed to */
15739         did = sli4_did_from_fc_hdr(fc_hdr);
15740
15741         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
15742         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
15743                 (did != Fabric_DID)) {
15744                 /*
15745                  * Throw out the frame if we are not pt2pt.
15746                  * The pt2pt protocol allows for discovery frames
15747                  * to be received without a registered VPI.
15748                  */
15749                 if (!(vport->fc_flag & FC_PT2PT) ||
15750                         (phba->link_state == LPFC_HBA_READY)) {
15751                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
15752                         return;
15753                 }
15754         }
15755
15756         /* Handle the basic abort sequence (BA_ABTS) event */
15757         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
15758                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
15759                 return;
15760         }
15761
15762         /* Link this frame */
15763         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
15764         if (!seq_dmabuf) {
15765                 /* unable to add frame to vport - throw it out */
15766                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15767                 return;
15768         }
15769         /* If not last frame in sequence continue processing frames. */
15770         if (!lpfc_seq_complete(seq_dmabuf))
15771                 return;
15772
15773         /* Send the complete sequence to the upper layer protocol */
15774         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
15775 }
15776
15777 /**
15778  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
15779  * @phba: pointer to lpfc hba data structure.
15780  *
15781  * This routine is invoked to post rpi header templates to the
15782  * HBA consistent with the SLI-4 interface spec.  This routine
15783  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15784  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15785  *
15786  * This routine does not require any locks.  It's usage is expected
15787  * to be driver load or reset recovery when the driver is
15788  * sequential.
15789  *
15790  * Return codes
15791  *      0 - successful
15792  *      -EIO - The mailbox failed to complete successfully.
15793  *      When this error occurs, the driver is not guaranteed
15794  *      to have any rpi regions posted to the device and
15795  *      must either attempt to repost the regions or take a
15796  *      fatal error.
15797  **/
15798 int
15799 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
15800 {
15801         struct lpfc_rpi_hdr *rpi_page;
15802         uint32_t rc = 0;
15803         uint16_t lrpi = 0;
15804
15805         /* SLI4 ports that support extents do not require RPI headers. */
15806         if (!phba->sli4_hba.rpi_hdrs_in_use)
15807                 goto exit;
15808         if (phba->sli4_hba.extents_in_use)
15809                 return -EIO;
15810
15811         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
15812                 /*
15813                  * Assign the rpi headers a physical rpi only if the driver
15814                  * has not initialized those resources.  A port reset only
15815                  * needs the headers posted.
15816                  */
15817                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
15818                     LPFC_RPI_RSRC_RDY)
15819                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15820
15821                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
15822                 if (rc != MBX_SUCCESS) {
15823                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15824                                         "2008 Error %d posting all rpi "
15825                                         "headers\n", rc);
15826                         rc = -EIO;
15827                         break;
15828                 }
15829         }
15830
15831  exit:
15832         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
15833                LPFC_RPI_RSRC_RDY);
15834         return rc;
15835 }
15836
15837 /**
15838  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
15839  * @phba: pointer to lpfc hba data structure.
15840  * @rpi_page:  pointer to the rpi memory region.
15841  *
15842  * This routine is invoked to post a single rpi header to the
15843  * HBA consistent with the SLI-4 interface spec.  This memory region
15844  * maps up to 64 rpi context regions.
15845  *
15846  * Return codes
15847  *      0 - successful
15848  *      -ENOMEM - No available memory
15849  *      -EIO - The mailbox failed to complete successfully.
15850  **/
15851 int
15852 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
15853 {
15854         LPFC_MBOXQ_t *mboxq;
15855         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
15856         uint32_t rc = 0;
15857         uint32_t shdr_status, shdr_add_status;
15858         union lpfc_sli4_cfg_shdr *shdr;
15859
15860         /* SLI4 ports that support extents do not require RPI headers. */
15861         if (!phba->sli4_hba.rpi_hdrs_in_use)
15862                 return rc;
15863         if (phba->sli4_hba.extents_in_use)
15864                 return -EIO;
15865
15866         /* The port is notified of the header region via a mailbox command. */
15867         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15868         if (!mboxq) {
15869                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15870                                 "2001 Unable to allocate memory for issuing "
15871                                 "SLI_CONFIG_SPECIAL mailbox command\n");
15872                 return -ENOMEM;
15873         }
15874
15875         /* Post all rpi memory regions to the port. */
15876         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
15877         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15878                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
15879                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
15880                          sizeof(struct lpfc_sli4_cfg_mhdr),
15881                          LPFC_SLI4_MBX_EMBED);
15882
15883
15884         /* Post the physical rpi to the port for this rpi header. */
15885         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
15886                rpi_page->start_rpi);
15887         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
15888                hdr_tmpl, rpi_page->page_count);
15889
15890         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
15891         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
15892         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15893         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
15894         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15895         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15896         if (rc != MBX_TIMEOUT)
15897                 mempool_free(mboxq, phba->mbox_mem_pool);
15898         if (shdr_status || shdr_add_status || rc) {
15899                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15900                                 "2514 POST_RPI_HDR mailbox failed with "
15901                                 "status x%x add_status x%x, mbx status x%x\n",
15902                                 shdr_status, shdr_add_status, rc);
15903                 rc = -ENXIO;
15904         }
15905         return rc;
15906 }
15907
15908 /**
15909  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
15910  * @phba: pointer to lpfc hba data structure.
15911  *
15912  * This routine is invoked to post rpi header templates to the
15913  * HBA consistent with the SLI-4 interface spec.  This routine
15914  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15915  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15916  *
15917  * Returns
15918  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15919  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
15920  **/
15921 int
15922 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15923 {
15924         unsigned long rpi;
15925         uint16_t max_rpi, rpi_limit;
15926         uint16_t rpi_remaining, lrpi = 0;
15927         struct lpfc_rpi_hdr *rpi_hdr;
15928         unsigned long iflag;
15929
15930         /*
15931          * Fetch the next logical rpi.  Because this index is logical,
15932          * the  driver starts at 0 each time.
15933          */
15934         spin_lock_irqsave(&phba->hbalock, iflag);
15935         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
15936         rpi_limit = phba->sli4_hba.next_rpi;
15937
15938         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15939         if (rpi >= rpi_limit)
15940                 rpi = LPFC_RPI_ALLOC_ERROR;
15941         else {
15942                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
15943                 phba->sli4_hba.max_cfg_param.rpi_used++;
15944                 phba->sli4_hba.rpi_count++;
15945         }
15946         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15947                         "0001 rpi:%x max:%x lim:%x\n",
15948                         (int) rpi, max_rpi, rpi_limit);
15949
15950         /*
15951          * Don't try to allocate more rpi header regions if the device limit
15952          * has been exhausted.
15953          */
15954         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15955             (phba->sli4_hba.rpi_count >= max_rpi)) {
15956                 spin_unlock_irqrestore(&phba->hbalock, iflag);
15957                 return rpi;
15958         }
15959
15960         /*
15961          * RPI header postings are not required for SLI4 ports capable of
15962          * extents.
15963          */
15964         if (!phba->sli4_hba.rpi_hdrs_in_use) {
15965                 spin_unlock_irqrestore(&phba->hbalock, iflag);
15966                 return rpi;
15967         }
15968
15969         /*
15970          * If the driver is running low on rpi resources, allocate another
15971          * page now.  Note that the next_rpi value is used because
15972          * it represents how many are actually in use whereas max_rpi notes
15973          * how many are supported max by the device.
15974          */
15975         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15976         spin_unlock_irqrestore(&phba->hbalock, iflag);
15977         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15978                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15979                 if (!rpi_hdr) {
15980                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15981                                         "2002 Error Could not grow rpi "
15982                                         "count\n");
15983                 } else {
15984                         lrpi = rpi_hdr->start_rpi;
15985                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15986                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15987                 }
15988         }
15989
15990         return rpi;
15991 }
15992
15993 /**
15994  * lpfc_sli4_free_rpi - Release an rpi for reuse.
15995  * @phba: pointer to lpfc hba data structure.
15996  *
15997  * This routine is invoked to release an rpi to the pool of
15998  * available rpis maintained by the driver.
15999  **/
16000 static void
16001 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16002 {
16003         /*
16004          * if the rpi value indicates a prior unreg has already
16005          * been done, skip the unreg.
16006          */
16007         if (rpi == LPFC_RPI_ALLOC_ERROR)
16008                 return;
16009
16010         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
16011                 phba->sli4_hba.rpi_count--;
16012                 phba->sli4_hba.max_cfg_param.rpi_used--;
16013         }
16014 }
16015
16016 /**
16017  * lpfc_sli4_free_rpi - Release an rpi for reuse.
16018  * @phba: pointer to lpfc hba data structure.
16019  *
16020  * This routine is invoked to release an rpi to the pool of
16021  * available rpis maintained by the driver.
16022  **/
16023 void
16024 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16025 {
16026         spin_lock_irq(&phba->hbalock);
16027         __lpfc_sli4_free_rpi(phba, rpi);
16028         spin_unlock_irq(&phba->hbalock);
16029 }
16030
16031 /**
16032  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
16033  * @phba: pointer to lpfc hba data structure.
16034  *
16035  * This routine is invoked to remove the memory region that
16036  * provided rpi via a bitmask.
16037  **/
16038 void
16039 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
16040 {
16041         kfree(phba->sli4_hba.rpi_bmask);
16042         kfree(phba->sli4_hba.rpi_ids);
16043         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
16044 }
16045
16046 /**
16047  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
16048  * @phba: pointer to lpfc hba data structure.
16049  *
16050  * This routine is invoked to remove the memory region that
16051  * provided rpi via a bitmask.
16052  **/
16053 int
16054 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
16055         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
16056 {
16057         LPFC_MBOXQ_t *mboxq;
16058         struct lpfc_hba *phba = ndlp->phba;
16059         int rc;
16060
16061         /* The port is notified of the header region via a mailbox command. */
16062         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16063         if (!mboxq)
16064                 return -ENOMEM;
16065
16066         /* Post all rpi memory regions to the port. */
16067         lpfc_resume_rpi(mboxq, ndlp);
16068         if (cmpl) {
16069                 mboxq->mbox_cmpl = cmpl;
16070                 mboxq->context1 = arg;
16071                 mboxq->context2 = ndlp;
16072         } else
16073                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16074         mboxq->vport = ndlp->vport;
16075         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16076         if (rc == MBX_NOT_FINISHED) {
16077                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16078                                 "2010 Resume RPI Mailbox failed "
16079                                 "status %d, mbxStatus x%x\n", rc,
16080                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16081                 mempool_free(mboxq, phba->mbox_mem_pool);
16082                 return -EIO;
16083         }
16084         return 0;
16085 }
16086
16087 /**
16088  * lpfc_sli4_init_vpi - Initialize a vpi with the port
16089  * @vport: Pointer to the vport for which the vpi is being initialized
16090  *
16091  * This routine is invoked to activate a vpi with the port.
16092  *
16093  * Returns:
16094  *    0 success
16095  *    -Evalue otherwise
16096  **/
16097 int
16098 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
16099 {
16100         LPFC_MBOXQ_t *mboxq;
16101         int rc = 0;
16102         int retval = MBX_SUCCESS;
16103         uint32_t mbox_tmo;
16104         struct lpfc_hba *phba = vport->phba;
16105         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16106         if (!mboxq)
16107                 return -ENOMEM;
16108         lpfc_init_vpi(phba, mboxq, vport->vpi);
16109         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
16110         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
16111         if (rc != MBX_SUCCESS) {
16112                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
16113                                 "2022 INIT VPI Mailbox failed "
16114                                 "status %d, mbxStatus x%x\n", rc,
16115                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16116                 retval = -EIO;
16117         }
16118         if (rc != MBX_TIMEOUT)
16119                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
16120
16121         return retval;
16122 }
16123
16124 /**
16125  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
16126  * @phba: pointer to lpfc hba data structure.
16127  * @mboxq: Pointer to mailbox object.
16128  *
16129  * This routine is invoked to manually add a single FCF record. The caller
16130  * must pass a completely initialized FCF_Record.  This routine takes
16131  * care of the nonembedded mailbox operations.
16132  **/
16133 static void
16134 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
16135 {
16136         void *virt_addr;
16137         union lpfc_sli4_cfg_shdr *shdr;
16138         uint32_t shdr_status, shdr_add_status;
16139
16140         virt_addr = mboxq->sge_array->addr[0];
16141         /* The IOCTL status is embedded in the mailbox subheader. */
16142         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
16143         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16144         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16145
16146         if ((shdr_status || shdr_add_status) &&
16147                 (shdr_status != STATUS_FCF_IN_USE))
16148                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16149                         "2558 ADD_FCF_RECORD mailbox failed with "
16150                         "status x%x add_status x%x\n",
16151                         shdr_status, shdr_add_status);
16152
16153         lpfc_sli4_mbox_cmd_free(phba, mboxq);
16154 }
16155
16156 /**
16157  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
16158  * @phba: pointer to lpfc hba data structure.
16159  * @fcf_record:  pointer to the initialized fcf record to add.
16160  *
16161  * This routine is invoked to manually add a single FCF record. The caller
16162  * must pass a completely initialized FCF_Record.  This routine takes
16163  * care of the nonembedded mailbox operations.
16164  **/
16165 int
16166 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
16167 {
16168         int rc = 0;
16169         LPFC_MBOXQ_t *mboxq;
16170         uint8_t *bytep;
16171         void *virt_addr;
16172         struct lpfc_mbx_sge sge;
16173         uint32_t alloc_len, req_len;
16174         uint32_t fcfindex;
16175
16176         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16177         if (!mboxq) {
16178                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16179                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
16180                 return -ENOMEM;
16181         }
16182
16183         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
16184                   sizeof(uint32_t);
16185
16186         /* Allocate DMA memory and set up the non-embedded mailbox command */
16187         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16188                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
16189                                      req_len, LPFC_SLI4_MBX_NEMBED);
16190         if (alloc_len < req_len) {
16191                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16192                         "2523 Allocated DMA memory size (x%x) is "
16193                         "less than the requested DMA memory "
16194                         "size (x%x)\n", alloc_len, req_len);
16195                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16196                 return -ENOMEM;
16197         }
16198
16199         /*
16200          * Get the first SGE entry from the non-embedded DMA memory.  This
16201          * routine only uses a single SGE.
16202          */
16203         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
16204         virt_addr = mboxq->sge_array->addr[0];
16205         /*
16206          * Configure the FCF record for FCFI 0.  This is the driver's
16207          * hardcoded default and gets used in nonFIP mode.
16208          */
16209         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16210         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16211         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16212
16213         /*
16214          * Copy the fcf_index and the FCF Record Data. The data starts after
16215          * the FCoE header plus word10. The data copy needs to be endian
16216          * correct.
16217          */
16218         bytep += sizeof(uint32_t);
16219         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16220         mboxq->vport = phba->pport;
16221         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16222         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16223         if (rc == MBX_NOT_FINISHED) {
16224                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16225                         "2515 ADD_FCF_RECORD mailbox failed with "
16226                         "status 0x%x\n", rc);
16227                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16228                 rc = -EIO;
16229         } else
16230                 rc = 0;
16231
16232         return rc;
16233 }
16234
16235 /**
16236  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
16237  * @phba: pointer to lpfc hba data structure.
16238  * @fcf_record:  pointer to the fcf record to write the default data.
16239  * @fcf_index: FCF table entry index.
16240  *
16241  * This routine is invoked to build the driver's default FCF record.  The
16242  * values used are hardcoded.  This routine handles memory initialization.
16243  *
16244  **/
16245 void
16246 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16247                                 struct fcf_record *fcf_record,
16248                                 uint16_t fcf_index)
16249 {
16250         memset(fcf_record, 0, sizeof(struct fcf_record));
16251         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16252         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16253         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16254         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16255         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16256         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16257         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16258         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16259         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16260         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16261         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16262         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16263         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
16264         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
16265         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16266         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16267                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16268         /* Set the VLAN bit map */
16269         if (phba->valid_vlan) {
16270                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
16271                         = 1 << (phba->vlan_id % 8);
16272         }
16273 }
16274
16275 /**
16276  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
16277  * @phba: pointer to lpfc hba data structure.
16278  * @fcf_index: FCF table entry offset.
16279  *
16280  * This routine is invoked to scan the entire FCF table by reading FCF
16281  * record and processing it one at a time starting from the @fcf_index
16282  * for initial FCF discovery or fast FCF failover rediscovery.
16283  *
16284  * Return 0 if the mailbox command is submitted successfully, none 0
16285  * otherwise.
16286  **/
16287 int
16288 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16289 {
16290         int rc = 0, error;
16291         LPFC_MBOXQ_t *mboxq;
16292
16293         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
16294         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
16295         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16296         if (!mboxq) {
16297                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16298                                 "2000 Failed to allocate mbox for "
16299                                 "READ_FCF cmd\n");
16300                 error = -ENOMEM;
16301                 goto fail_fcf_scan;
16302         }
16303         /* Construct the read FCF record mailbox command */
16304         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16305         if (rc) {
16306                 error = -EINVAL;
16307                 goto fail_fcf_scan;
16308         }
16309         /* Issue the mailbox command asynchronously */
16310         mboxq->vport = phba->pport;
16311         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
16312
16313         spin_lock_irq(&phba->hbalock);
16314         phba->hba_flag |= FCF_TS_INPROG;
16315         spin_unlock_irq(&phba->hbalock);
16316
16317         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16318         if (rc == MBX_NOT_FINISHED)
16319                 error = -EIO;
16320         else {
16321                 /* Reset eligible FCF count for new scan */
16322                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
16323                         phba->fcf.eligible_fcf_cnt = 0;
16324                 error = 0;
16325         }
16326 fail_fcf_scan:
16327         if (error) {
16328                 if (mboxq)
16329                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
16330                 /* FCF scan failed, clear FCF_TS_INPROG flag */
16331                 spin_lock_irq(&phba->hbalock);
16332                 phba->hba_flag &= ~FCF_TS_INPROG;
16333                 spin_unlock_irq(&phba->hbalock);
16334         }
16335         return error;
16336 }
16337
16338 /**
16339  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
16340  * @phba: pointer to lpfc hba data structure.
16341  * @fcf_index: FCF table entry offset.
16342  *
16343  * This routine is invoked to read an FCF record indicated by @fcf_index
16344  * and to use it for FLOGI roundrobin FCF failover.
16345  *
16346  * Return 0 if the mailbox command is submitted successfully, none 0
16347  * otherwise.
16348  **/
16349 int
16350 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16351 {
16352         int rc = 0, error;
16353         LPFC_MBOXQ_t *mboxq;
16354
16355         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16356         if (!mboxq) {
16357                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16358                                 "2763 Failed to allocate mbox for "
16359                                 "READ_FCF cmd\n");
16360                 error = -ENOMEM;
16361                 goto fail_fcf_read;
16362         }
16363         /* Construct the read FCF record mailbox command */
16364         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16365         if (rc) {
16366                 error = -EINVAL;
16367                 goto fail_fcf_read;
16368         }
16369         /* Issue the mailbox command asynchronously */
16370         mboxq->vport = phba->pport;
16371         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16372         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16373         if (rc == MBX_NOT_FINISHED)
16374                 error = -EIO;
16375         else
16376                 error = 0;
16377
16378 fail_fcf_read:
16379         if (error && mboxq)
16380                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16381         return error;
16382 }
16383
16384 /**
16385  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16386  * @phba: pointer to lpfc hba data structure.
16387  * @fcf_index: FCF table entry offset.
16388  *
16389  * This routine is invoked to read an FCF record indicated by @fcf_index to
16390  * determine whether it's eligible for FLOGI roundrobin failover list.
16391  *
16392  * Return 0 if the mailbox command is submitted successfully, none 0
16393  * otherwise.
16394  **/
16395 int
16396 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16397 {
16398         int rc = 0, error;
16399         LPFC_MBOXQ_t *mboxq;
16400
16401         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16402         if (!mboxq) {
16403                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16404                                 "2758 Failed to allocate mbox for "
16405                                 "READ_FCF cmd\n");
16406                                 error = -ENOMEM;
16407                                 goto fail_fcf_read;
16408         }
16409         /* Construct the read FCF record mailbox command */
16410         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16411         if (rc) {
16412                 error = -EINVAL;
16413                 goto fail_fcf_read;
16414         }
16415         /* Issue the mailbox command asynchronously */
16416         mboxq->vport = phba->pport;
16417         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16418         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16419         if (rc == MBX_NOT_FINISHED)
16420                 error = -EIO;
16421         else
16422                 error = 0;
16423
16424 fail_fcf_read:
16425         if (error && mboxq)
16426                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16427         return error;
16428 }
16429
16430 /**
16431  * lpfc_check_next_fcf_pri_level
16432  * phba pointer to the lpfc_hba struct for this port.
16433  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16434  * routine when the rr_bmask is empty. The FCF indecies are put into the
16435  * rr_bmask based on their priority level. Starting from the highest priority
16436  * to the lowest. The most likely FCF candidate will be in the highest
16437  * priority group. When this routine is called it searches the fcf_pri list for
16438  * next lowest priority group and repopulates the rr_bmask with only those
16439  * fcf_indexes.
16440  * returns:
16441  * 1=success 0=failure
16442  **/
16443 static int
16444 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16445 {
16446         uint16_t next_fcf_pri;
16447         uint16_t last_index;
16448         struct lpfc_fcf_pri *fcf_pri;
16449         int rc;
16450         int ret = 0;
16451
16452         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
16453                         LPFC_SLI4_FCF_TBL_INDX_MAX);
16454         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16455                         "3060 Last IDX %d\n", last_index);
16456
16457         /* Verify the priority list has 2 or more entries */
16458         spin_lock_irq(&phba->hbalock);
16459         if (list_empty(&phba->fcf.fcf_pri_list) ||
16460             list_is_singular(&phba->fcf.fcf_pri_list)) {
16461                 spin_unlock_irq(&phba->hbalock);
16462                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16463                         "3061 Last IDX %d\n", last_index);
16464                 return 0; /* Empty rr list */
16465         }
16466         spin_unlock_irq(&phba->hbalock);
16467
16468         next_fcf_pri = 0;
16469         /*
16470          * Clear the rr_bmask and set all of the bits that are at this
16471          * priority.
16472          */
16473         memset(phba->fcf.fcf_rr_bmask, 0,
16474                         sizeof(*phba->fcf.fcf_rr_bmask));
16475         spin_lock_irq(&phba->hbalock);
16476         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16477                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
16478                         continue;
16479                 /*
16480                  * the 1st priority that has not FLOGI failed
16481                  * will be the highest.
16482                  */
16483                 if (!next_fcf_pri)
16484                         next_fcf_pri = fcf_pri->fcf_rec.priority;
16485                 spin_unlock_irq(&phba->hbalock);
16486                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16487                         rc = lpfc_sli4_fcf_rr_index_set(phba,
16488                                                 fcf_pri->fcf_rec.fcf_index);
16489                         if (rc)
16490                                 return 0;
16491                 }
16492                 spin_lock_irq(&phba->hbalock);
16493         }
16494         /*
16495          * if next_fcf_pri was not set above and the list is not empty then
16496          * we have failed flogis on all of them. So reset flogi failed
16497          * and start at the beginning.
16498          */
16499         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
16500                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16501                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
16502                         /*
16503                          * the 1st priority that has not FLOGI failed
16504                          * will be the highest.
16505                          */
16506                         if (!next_fcf_pri)
16507                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
16508                         spin_unlock_irq(&phba->hbalock);
16509                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16510                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
16511                                                 fcf_pri->fcf_rec.fcf_index);
16512                                 if (rc)
16513                                         return 0;
16514                         }
16515                         spin_lock_irq(&phba->hbalock);
16516                 }
16517         } else
16518                 ret = 1;
16519         spin_unlock_irq(&phba->hbalock);
16520
16521         return ret;
16522 }
16523 /**
16524  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
16525  * @phba: pointer to lpfc hba data structure.
16526  *
16527  * This routine is to get the next eligible FCF record index in a round
16528  * robin fashion. If the next eligible FCF record index equals to the
16529  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
16530  * shall be returned, otherwise, the next eligible FCF record's index
16531  * shall be returned.
16532  **/
16533 uint16_t
16534 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
16535 {
16536         uint16_t next_fcf_index;
16537
16538 initial_priority:
16539         /* Search start from next bit of currently registered FCF index */
16540         next_fcf_index = phba->fcf.current_rec.fcf_indx;
16541
16542 next_priority:
16543         /* Determine the next fcf index to check */
16544         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
16545         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16546                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
16547                                        next_fcf_index);
16548
16549         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
16550         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16551                 /*
16552                  * If we have wrapped then we need to clear the bits that
16553                  * have been tested so that we can detect when we should
16554                  * change the priority level.
16555                  */
16556                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16557                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
16558         }
16559
16560
16561         /* Check roundrobin failover list empty condition */
16562         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
16563                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
16564                 /*
16565                  * If next fcf index is not found check if there are lower
16566                  * Priority level fcf's in the fcf_priority list.
16567                  * Set up the rr_bmask with all of the avaiable fcf bits
16568                  * at that level and continue the selection process.
16569                  */
16570                 if (lpfc_check_next_fcf_pri_level(phba))
16571                         goto initial_priority;
16572                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16573                                 "2844 No roundrobin failover FCF available\n");
16574
16575                 return LPFC_FCOE_FCF_NEXT_NONE;
16576         }
16577
16578         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
16579                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
16580                 LPFC_FCF_FLOGI_FAILED) {
16581                 if (list_is_singular(&phba->fcf.fcf_pri_list))
16582                         return LPFC_FCOE_FCF_NEXT_NONE;
16583
16584                 goto next_priority;
16585         }
16586
16587         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16588                         "2845 Get next roundrobin failover FCF (x%x)\n",
16589                         next_fcf_index);
16590
16591         return next_fcf_index;
16592 }
16593
16594 /**
16595  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
16596  * @phba: pointer to lpfc hba data structure.
16597  *
16598  * This routine sets the FCF record index in to the eligible bmask for
16599  * roundrobin failover search. It checks to make sure that the index
16600  * does not go beyond the range of the driver allocated bmask dimension
16601  * before setting the bit.
16602  *
16603  * Returns 0 if the index bit successfully set, otherwise, it returns
16604  * -EINVAL.
16605  **/
16606 int
16607 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
16608 {
16609         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16610                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16611                                 "2610 FCF (x%x) reached driver's book "
16612                                 "keeping dimension:x%x\n",
16613                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16614                 return -EINVAL;
16615         }
16616         /* Set the eligible FCF record index bmask */
16617         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16618
16619         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16620                         "2790 Set FCF (x%x) to roundrobin FCF failover "
16621                         "bmask\n", fcf_index);
16622
16623         return 0;
16624 }
16625
16626 /**
16627  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
16628  * @phba: pointer to lpfc hba data structure.
16629  *
16630  * This routine clears the FCF record index from the eligible bmask for
16631  * roundrobin failover search. It checks to make sure that the index
16632  * does not go beyond the range of the driver allocated bmask dimension
16633  * before clearing the bit.
16634  **/
16635 void
16636 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
16637 {
16638         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
16639         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16640                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16641                                 "2762 FCF (x%x) reached driver's book "
16642                                 "keeping dimension:x%x\n",
16643                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16644                 return;
16645         }
16646         /* Clear the eligible FCF record index bmask */
16647         spin_lock_irq(&phba->hbalock);
16648         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
16649                                  list) {
16650                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
16651                         list_del_init(&fcf_pri->list);
16652                         break;
16653                 }
16654         }
16655         spin_unlock_irq(&phba->hbalock);
16656         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16657
16658         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16659                         "2791 Clear FCF (x%x) from roundrobin failover "
16660                         "bmask\n", fcf_index);
16661 }
16662
16663 /**
16664  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
16665  * @phba: pointer to lpfc hba data structure.
16666  *
16667  * This routine is the completion routine for the rediscover FCF table mailbox
16668  * command. If the mailbox command returned failure, it will try to stop the
16669  * FCF rediscover wait timer.
16670  **/
16671 static void
16672 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
16673 {
16674         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16675         uint32_t shdr_status, shdr_add_status;
16676
16677         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16678
16679         shdr_status = bf_get(lpfc_mbox_hdr_status,
16680                              &redisc_fcf->header.cfg_shdr.response);
16681         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
16682                              &redisc_fcf->header.cfg_shdr.response);
16683         if (shdr_status || shdr_add_status) {
16684                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16685                                 "2746 Requesting for FCF rediscovery failed "
16686                                 "status x%x add_status x%x\n",
16687                                 shdr_status, shdr_add_status);
16688                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
16689                         spin_lock_irq(&phba->hbalock);
16690                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
16691                         spin_unlock_irq(&phba->hbalock);
16692                         /*
16693                          * CVL event triggered FCF rediscover request failed,
16694                          * last resort to re-try current registered FCF entry.
16695                          */
16696                         lpfc_retry_pport_discovery(phba);
16697                 } else {
16698                         spin_lock_irq(&phba->hbalock);
16699                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
16700                         spin_unlock_irq(&phba->hbalock);
16701                         /*
16702                          * DEAD FCF event triggered FCF rediscover request
16703                          * failed, last resort to fail over as a link down
16704                          * to FCF registration.
16705                          */
16706                         lpfc_sli4_fcf_dead_failthrough(phba);
16707                 }
16708         } else {
16709                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16710                                 "2775 Start FCF rediscover quiescent timer\n");
16711                 /*
16712                  * Start FCF rediscovery wait timer for pending FCF
16713                  * before rescan FCF record table.
16714                  */
16715                 lpfc_fcf_redisc_wait_start_timer(phba);
16716         }
16717
16718         mempool_free(mbox, phba->mbox_mem_pool);
16719 }
16720
16721 /**
16722  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
16723  * @phba: pointer to lpfc hba data structure.
16724  *
16725  * This routine is invoked to request for rediscovery of the entire FCF table
16726  * by the port.
16727  **/
16728 int
16729 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
16730 {
16731         LPFC_MBOXQ_t *mbox;
16732         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16733         int rc, length;
16734
16735         /* Cancel retry delay timers to all vports before FCF rediscover */
16736         lpfc_cancel_all_vport_retry_delay_timer(phba);
16737
16738         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16739         if (!mbox) {
16740                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16741                                 "2745 Failed to allocate mbox for "
16742                                 "requesting FCF rediscover.\n");
16743                 return -ENOMEM;
16744         }
16745
16746         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
16747                   sizeof(struct lpfc_sli4_cfg_mhdr));
16748         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16749                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
16750                          length, LPFC_SLI4_MBX_EMBED);
16751
16752         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16753         /* Set count to 0 for invalidating the entire FCF database */
16754         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
16755
16756         /* Issue the mailbox command asynchronously */
16757         mbox->vport = phba->pport;
16758         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
16759         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
16760
16761         if (rc == MBX_NOT_FINISHED) {
16762                 mempool_free(mbox, phba->mbox_mem_pool);
16763                 return -EIO;
16764         }
16765         return 0;
16766 }
16767
16768 /**
16769  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
16770  * @phba: pointer to lpfc hba data structure.
16771  *
16772  * This function is the failover routine as a last resort to the FCF DEAD
16773  * event when driver failed to perform fast FCF failover.
16774  **/
16775 void
16776 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
16777 {
16778         uint32_t link_state;
16779
16780         /*
16781          * Last resort as FCF DEAD event failover will treat this as
16782          * a link down, but save the link state because we don't want
16783          * it to be changed to Link Down unless it is already down.
16784          */
16785         link_state = phba->link_state;
16786         lpfc_linkdown(phba);
16787         phba->link_state = link_state;
16788
16789         /* Unregister FCF if no devices connected to it */
16790         lpfc_unregister_unused_fcf(phba);
16791 }
16792
16793 /**
16794  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
16795  * @phba: pointer to lpfc hba data structure.
16796  * @rgn23_data: pointer to configure region 23 data.
16797  *
16798  * This function gets SLI3 port configure region 23 data through memory dump
16799  * mailbox command. When it successfully retrieves data, the size of the data
16800  * will be returned, otherwise, 0 will be returned.
16801  **/
16802 static uint32_t
16803 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16804 {
16805         LPFC_MBOXQ_t *pmb = NULL;
16806         MAILBOX_t *mb;
16807         uint32_t offset = 0;
16808         int rc;
16809
16810         if (!rgn23_data)
16811                 return 0;
16812
16813         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16814         if (!pmb) {
16815                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16816                                 "2600 failed to allocate mailbox memory\n");
16817                 return 0;
16818         }
16819         mb = &pmb->u.mb;
16820
16821         do {
16822                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
16823                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
16824
16825                 if (rc != MBX_SUCCESS) {
16826                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16827                                         "2601 failed to read config "
16828                                         "region 23, rc 0x%x Status 0x%x\n",
16829                                         rc, mb->mbxStatus);
16830                         mb->un.varDmp.word_cnt = 0;
16831                 }
16832                 /*
16833                  * dump mem may return a zero when finished or we got a
16834                  * mailbox error, either way we are done.
16835                  */
16836                 if (mb->un.varDmp.word_cnt == 0)
16837                         break;
16838                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
16839                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
16840
16841                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
16842                                        rgn23_data + offset,
16843                                        mb->un.varDmp.word_cnt);
16844                 offset += mb->un.varDmp.word_cnt;
16845         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
16846
16847         mempool_free(pmb, phba->mbox_mem_pool);
16848         return offset;
16849 }
16850
16851 /**
16852  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
16853  * @phba: pointer to lpfc hba data structure.
16854  * @rgn23_data: pointer to configure region 23 data.
16855  *
16856  * This function gets SLI4 port configure region 23 data through memory dump
16857  * mailbox command. When it successfully retrieves data, the size of the data
16858  * will be returned, otherwise, 0 will be returned.
16859  **/
16860 static uint32_t
16861 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16862 {
16863         LPFC_MBOXQ_t *mboxq = NULL;
16864         struct lpfc_dmabuf *mp = NULL;
16865         struct lpfc_mqe *mqe;
16866         uint32_t data_length = 0;
16867         int rc;
16868
16869         if (!rgn23_data)
16870                 return 0;
16871
16872         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16873         if (!mboxq) {
16874                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16875                                 "3105 failed to allocate mailbox memory\n");
16876                 return 0;
16877         }
16878
16879         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
16880                 goto out;
16881         mqe = &mboxq->u.mqe;
16882         mp = (struct lpfc_dmabuf *) mboxq->context1;
16883         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
16884         if (rc)
16885                 goto out;
16886         data_length = mqe->un.mb_words[5];
16887         if (data_length == 0)
16888                 goto out;
16889         if (data_length > DMP_RGN23_SIZE) {
16890                 data_length = 0;
16891                 goto out;
16892         }
16893         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
16894 out:
16895         mempool_free(mboxq, phba->mbox_mem_pool);
16896         if (mp) {
16897                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
16898                 kfree(mp);
16899         }
16900         return data_length;
16901 }
16902
16903 /**
16904  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
16905  * @phba: pointer to lpfc hba data structure.
16906  *
16907  * This function read region 23 and parse TLV for port status to
16908  * decide if the user disaled the port. If the TLV indicates the
16909  * port is disabled, the hba_flag is set accordingly.
16910  **/
16911 void
16912 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
16913 {
16914         uint8_t *rgn23_data = NULL;
16915         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
16916         uint32_t offset = 0;
16917
16918         /* Get adapter Region 23 data */
16919         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
16920         if (!rgn23_data)
16921                 goto out;
16922
16923         if (phba->sli_rev < LPFC_SLI_REV4)
16924                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
16925         else {
16926                 if_type = bf_get(lpfc_sli_intf_if_type,
16927                                  &phba->sli4_hba.sli_intf);
16928                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
16929                         goto out;
16930                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16931         }
16932
16933         if (!data_size)
16934                 goto out;
16935
16936         /* Check the region signature first */
16937         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16938                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16939                         "2619 Config region 23 has bad signature\n");
16940                         goto out;
16941         }
16942         offset += 4;
16943
16944         /* Check the data structure version */
16945         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16946                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16947                         "2620 Config region 23 has bad version\n");
16948                 goto out;
16949         }
16950         offset += 4;
16951
16952         /* Parse TLV entries in the region */
16953         while (offset < data_size) {
16954                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16955                         break;
16956                 /*
16957                  * If the TLV is not driver specific TLV or driver id is
16958                  * not linux driver id, skip the record.
16959                  */
16960                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16961                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16962                     (rgn23_data[offset + 3] != 0)) {
16963                         offset += rgn23_data[offset + 1] * 4 + 4;
16964                         continue;
16965                 }
16966
16967                 /* Driver found a driver specific TLV in the config region */
16968                 sub_tlv_len = rgn23_data[offset + 1] * 4;
16969                 offset += 4;
16970                 tlv_offset = 0;
16971
16972                 /*
16973                  * Search for configured port state sub-TLV.
16974                  */
16975                 while ((offset < data_size) &&
16976                         (tlv_offset < sub_tlv_len)) {
16977                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16978                                 offset += 4;
16979                                 tlv_offset += 4;
16980                                 break;
16981                         }
16982                         if (rgn23_data[offset] != PORT_STE_TYPE) {
16983                                 offset += rgn23_data[offset + 1] * 4 + 4;
16984                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16985                                 continue;
16986                         }
16987
16988                         /* This HBA contains PORT_STE configured */
16989                         if (!rgn23_data[offset + 2])
16990                                 phba->hba_flag |= LINK_DISABLED;
16991
16992                         goto out;
16993                 }
16994         }
16995
16996 out:
16997         kfree(rgn23_data);
16998         return;
16999 }
17000
17001 /**
17002  * lpfc_wr_object - write an object to the firmware
17003  * @phba: HBA structure that indicates port to create a queue on.
17004  * @dmabuf_list: list of dmabufs to write to the port.
17005  * @size: the total byte value of the objects to write to the port.
17006  * @offset: the current offset to be used to start the transfer.
17007  *
17008  * This routine will create a wr_object mailbox command to send to the port.
17009  * the mailbox command will be constructed using the dma buffers described in
17010  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
17011  * BDEs that the imbedded mailbox can support. The @offset variable will be
17012  * used to indicate the starting offset of the transfer and will also return
17013  * the offset after the write object mailbox has completed. @size is used to
17014  * determine the end of the object and whether the eof bit should be set.
17015  *
17016  * Return 0 is successful and offset will contain the the new offset to use
17017  * for the next write.
17018  * Return negative value for error cases.
17019  **/
17020 int
17021 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
17022                uint32_t size, uint32_t *offset)
17023 {
17024         struct lpfc_mbx_wr_object *wr_object;
17025         LPFC_MBOXQ_t *mbox;
17026         int rc = 0, i = 0;
17027         uint32_t shdr_status, shdr_add_status;
17028         uint32_t mbox_tmo;
17029         union lpfc_sli4_cfg_shdr *shdr;
17030         struct lpfc_dmabuf *dmabuf;
17031         uint32_t written = 0;
17032
17033         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17034         if (!mbox)
17035                 return -ENOMEM;
17036
17037         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17038                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
17039                         sizeof(struct lpfc_mbx_wr_object) -
17040                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17041
17042         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
17043         wr_object->u.request.write_offset = *offset;
17044         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
17045         wr_object->u.request.object_name[0] =
17046                 cpu_to_le32(wr_object->u.request.object_name[0]);
17047         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
17048         list_for_each_entry(dmabuf, dmabuf_list, list) {
17049                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
17050                         break;
17051                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
17052                 wr_object->u.request.bde[i].addrHigh =
17053                         putPaddrHigh(dmabuf->phys);
17054                 if (written + SLI4_PAGE_SIZE >= size) {
17055                         wr_object->u.request.bde[i].tus.f.bdeSize =
17056                                 (size - written);
17057                         written += (size - written);
17058                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
17059                 } else {
17060                         wr_object->u.request.bde[i].tus.f.bdeSize =
17061                                 SLI4_PAGE_SIZE;
17062                         written += SLI4_PAGE_SIZE;
17063                 }
17064                 i++;
17065         }
17066         wr_object->u.request.bde_count = i;
17067         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
17068         if (!phba->sli4_hba.intr_enable)
17069                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17070         else {
17071                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17072                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17073         }
17074         /* The IOCTL status is embedded in the mailbox subheader. */
17075         shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
17076         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17077         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17078         if (rc != MBX_TIMEOUT)
17079                 mempool_free(mbox, phba->mbox_mem_pool);
17080         if (shdr_status || shdr_add_status || rc) {
17081                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17082                                 "3025 Write Object mailbox failed with "
17083                                 "status x%x add_status x%x, mbx status x%x\n",
17084                                 shdr_status, shdr_add_status, rc);
17085                 rc = -ENXIO;
17086         } else
17087                 *offset += wr_object->u.response.actual_write_length;
17088         return rc;
17089 }
17090
17091 /**
17092  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
17093  * @vport: pointer to vport data structure.
17094  *
17095  * This function iterate through the mailboxq and clean up all REG_LOGIN
17096  * and REG_VPI mailbox commands associated with the vport. This function
17097  * is called when driver want to restart discovery of the vport due to
17098  * a Clear Virtual Link event.
17099  **/
17100 void
17101 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
17102 {
17103         struct lpfc_hba *phba = vport->phba;
17104         LPFC_MBOXQ_t *mb, *nextmb;
17105         struct lpfc_dmabuf *mp;
17106         struct lpfc_nodelist *ndlp;
17107         struct lpfc_nodelist *act_mbx_ndlp = NULL;
17108         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
17109         LIST_HEAD(mbox_cmd_list);
17110         uint8_t restart_loop;
17111
17112         /* Clean up internally queued mailbox commands with the vport */
17113         spin_lock_irq(&phba->hbalock);
17114         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
17115                 if (mb->vport != vport)
17116                         continue;
17117
17118                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17119                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
17120                         continue;
17121
17122                 list_del(&mb->list);
17123                 list_add_tail(&mb->list, &mbox_cmd_list);
17124         }
17125         /* Clean up active mailbox command with the vport */
17126         mb = phba->sli.mbox_active;
17127         if (mb && (mb->vport == vport)) {
17128                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
17129                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
17130                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17131                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17132                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
17133                         /* Put reference count for delayed processing */
17134                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
17135                         /* Unregister the RPI when mailbox complete */
17136                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17137                 }
17138         }
17139         /* Cleanup any mailbox completions which are not yet processed */
17140         do {
17141                 restart_loop = 0;
17142                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
17143                         /*
17144                          * If this mailox is already processed or it is
17145                          * for another vport ignore it.
17146                          */
17147                         if ((mb->vport != vport) ||
17148                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
17149                                 continue;
17150
17151                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17152                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17153                                 continue;
17154
17155                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17156                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17157                                 ndlp = (struct lpfc_nodelist *)mb->context2;
17158                                 /* Unregister the RPI when mailbox complete */
17159                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17160                                 restart_loop = 1;
17161                                 spin_unlock_irq(&phba->hbalock);
17162                                 spin_lock(shost->host_lock);
17163                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17164                                 spin_unlock(shost->host_lock);
17165                                 spin_lock_irq(&phba->hbalock);
17166                                 break;
17167                         }
17168                 }
17169         } while (restart_loop);
17170
17171         spin_unlock_irq(&phba->hbalock);
17172
17173         /* Release the cleaned-up mailbox commands */
17174         while (!list_empty(&mbox_cmd_list)) {
17175                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
17176                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17177                         mp = (struct lpfc_dmabuf *) (mb->context1);
17178                         if (mp) {
17179                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
17180                                 kfree(mp);
17181                         }
17182                         ndlp = (struct lpfc_nodelist *) mb->context2;
17183                         mb->context2 = NULL;
17184                         if (ndlp) {
17185                                 spin_lock(shost->host_lock);
17186                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17187                                 spin_unlock(shost->host_lock);
17188                                 lpfc_nlp_put(ndlp);
17189                         }
17190                 }
17191                 mempool_free(mb, phba->mbox_mem_pool);
17192         }
17193
17194         /* Release the ndlp with the cleaned-up active mailbox command */
17195         if (act_mbx_ndlp) {
17196                 spin_lock(shost->host_lock);
17197                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17198                 spin_unlock(shost->host_lock);
17199                 lpfc_nlp_put(act_mbx_ndlp);
17200         }
17201 }
17202
17203 /**
17204  * lpfc_drain_txq - Drain the txq
17205  * @phba: Pointer to HBA context object.
17206  *
17207  * This function attempt to submit IOCBs on the txq
17208  * to the adapter.  For SLI4 adapters, the txq contains
17209  * ELS IOCBs that have been deferred because the there
17210  * are no SGLs.  This congestion can occur with large
17211  * vport counts during node discovery.
17212  **/
17213
17214 uint32_t
17215 lpfc_drain_txq(struct lpfc_hba *phba)
17216 {
17217         LIST_HEAD(completions);
17218         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
17219         struct lpfc_iocbq *piocbq = NULL;
17220         unsigned long iflags = 0;
17221         char *fail_msg = NULL;
17222         struct lpfc_sglq *sglq;
17223         union lpfc_wqe wqe;
17224         uint32_t txq_cnt = 0;
17225
17226         spin_lock_irqsave(&pring->ring_lock, iflags);
17227         list_for_each_entry(piocbq, &pring->txq, list) {
17228                 txq_cnt++;
17229         }
17230
17231         if (txq_cnt > pring->txq_max)
17232                 pring->txq_max = txq_cnt;
17233
17234         spin_unlock_irqrestore(&pring->ring_lock, iflags);
17235
17236         while (!list_empty(&pring->txq)) {
17237                 spin_lock_irqsave(&pring->ring_lock, iflags);
17238
17239                 piocbq = lpfc_sli_ringtx_get(phba, pring);
17240                 if (!piocbq) {
17241                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
17242                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17243                                 "2823 txq empty and txq_cnt is %d\n ",
17244                                 txq_cnt);
17245                         break;
17246                 }
17247                 sglq = __lpfc_sli_get_sglq(phba, piocbq);
17248                 if (!sglq) {
17249                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
17250                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
17251                         break;
17252                 }
17253                 txq_cnt--;
17254
17255                 /* The xri and iocb resources secured,
17256                  * attempt to issue request
17257                  */
17258                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
17259                 piocbq->sli4_xritag = sglq->sli4_xritag;
17260                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17261                         fail_msg = "to convert bpl to sgl";
17262                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
17263                         fail_msg = "to convert iocb to wqe";
17264                 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
17265                         fail_msg = " - Wq is full";
17266                 else
17267                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17268
17269                 if (fail_msg) {
17270                         /* Failed means we can't issue and need to cancel */
17271                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17272                                         "2822 IOCB failed %s iotag 0x%x "
17273                                         "xri 0x%x\n",
17274                                         fail_msg,
17275                                         piocbq->iotag, piocbq->sli4_xritag);
17276                         list_add_tail(&piocbq->list, &completions);
17277                         fail_msg = NULL;
17278                 }
17279                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17280         }
17281
17282         /* Cancel all the IOCBs that cannot be issued */
17283         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17284                                 IOERR_SLI_ABORTED);
17285
17286         return txq_cnt;
17287 }