2 /*******************************************************************
3 * This file is part of the Emulex Linux Device Driver for *
4 * Fibre Channel Host Bus Adapters. *
5 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
6 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
7 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
8 * EMULEX and SLI are trademarks of Emulex. *
10 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
12 * This program is free software; you can redistribute it and/or *
13 * modify it under the terms of version 2 of the GNU General *
14 * Public License as published by the Free Software Foundation. *
15 * This program is distributed in the hope that it will be useful. *
16 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
17 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
18 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
19 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
20 * TO BE LEGALLY INVALID. See the GNU General Public License for *
21 * more details, a copy of which can be found in the file COPYING *
22 * included with this package. *
23 *******************************************************************/
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/lockdep.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 #include <linux/aer.h>
40 #include <linux/nvme-fc-driver.h>
45 #include "lpfc_sli4.h"
47 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_nvmet.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_eqe *eqe, uint32_t qidx);
85 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
86 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
87 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
88 struct lpfc_sli_ring *pring,
89 struct lpfc_iocbq *cmdiocb);
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
98 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
99 * @q: The Work Queue to operate on.
100 * @wqe: The work Queue Entry to put on the Work queue.
102 * This routine will copy the contents of @wqe to the next available entry on
103 * the @q. This function will then ring the Work Queue Doorbell to signal the
104 * HBA to start processing the Work Queue Entry. This function returns 0 if
105 * successful. If no entries are available on @q then this function will return
107 * The caller is expected to hold the hbalock when calling this routine.
110 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
112 union lpfc_wqe *temp_wqe;
113 struct lpfc_register doorbell;
117 /* sanity check on queue memory */
120 temp_wqe = q->qe[q->host_index].wqe;
122 /* If the host has not yet processed the next entry then we are done */
123 idx = ((q->host_index + 1) % q->entry_count);
124 if (idx == q->hba_index) {
129 /* set consumption flag every once in a while */
130 if (!((q->host_index + 1) % q->entry_repost))
131 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
133 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
134 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
135 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
136 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
137 /* ensure WQE bcopy flushed before doorbell write */
140 /* Update the host index before invoking device */
141 host_index = q->host_index;
147 if (q->db_format == LPFC_DB_LIST_FORMAT) {
148 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
149 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
150 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
151 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
152 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
153 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
157 writel(doorbell.word0, q->db_regaddr);
163 * lpfc_sli4_wq_release - Updates internal hba index for WQ
164 * @q: The Work Queue to operate on.
165 * @index: The index to advance the hba index to.
167 * This routine will update the HBA index of a queue to reflect consumption of
168 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
169 * an entry the host calls this function to update the queue's internal
170 * pointers. This routine returns the number of entries that were consumed by
174 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
176 uint32_t released = 0;
178 /* sanity check on queue memory */
182 if (q->hba_index == index)
185 q->hba_index = ((q->hba_index + 1) % q->entry_count);
187 } while (q->hba_index != index);
192 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
193 * @q: The Mailbox Queue to operate on.
194 * @wqe: The Mailbox Queue Entry to put on the Work queue.
196 * This routine will copy the contents of @mqe to the next available entry on
197 * the @q. This function will then ring the Work Queue Doorbell to signal the
198 * HBA to start processing the Work Queue Entry. This function returns 0 if
199 * successful. If no entries are available on @q then this function will return
201 * The caller is expected to hold the hbalock when calling this routine.
204 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
206 struct lpfc_mqe *temp_mqe;
207 struct lpfc_register doorbell;
209 /* sanity check on queue memory */
212 temp_mqe = q->qe[q->host_index].mqe;
214 /* If the host has not yet processed the next entry then we are done */
215 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
217 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
218 /* Save off the mailbox pointer for completion */
219 q->phba->mbox = (MAILBOX_t *)temp_mqe;
221 /* Update the host index before invoking device */
222 q->host_index = ((q->host_index + 1) % q->entry_count);
226 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
227 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
228 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
233 * lpfc_sli4_mq_release - Updates internal hba index for MQ
234 * @q: The Mailbox Queue to operate on.
236 * This routine will update the HBA index of a queue to reflect consumption of
237 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
238 * an entry the host calls this function to update the queue's internal
239 * pointers. This routine returns the number of entries that were consumed by
243 lpfc_sli4_mq_release(struct lpfc_queue *q)
245 /* sanity check on queue memory */
249 /* Clear the mailbox pointer for completion */
250 q->phba->mbox = NULL;
251 q->hba_index = ((q->hba_index + 1) % q->entry_count);
256 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
257 * @q: The Event Queue to get the first valid EQE from
259 * This routine will get the first valid Event Queue Entry from @q, update
260 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
261 * the Queue (no more work to do), or the Queue is full of EQEs that have been
262 * processed, but not popped back to the HBA then this routine will return NULL.
264 static struct lpfc_eqe *
265 lpfc_sli4_eq_get(struct lpfc_queue *q)
267 struct lpfc_eqe *eqe;
270 /* sanity check on queue memory */
273 eqe = q->qe[q->hba_index].eqe;
275 /* If the next EQE is not valid then we are done */
276 if (!bf_get_le32(lpfc_eqe_valid, eqe))
278 /* If the host has not yet processed the next entry then we are done */
279 idx = ((q->hba_index + 1) % q->entry_count);
280 if (idx == q->host_index)
286 * insert barrier for instruction interlock : data from the hardware
287 * must have the valid bit checked before it can be copied and acted
288 * upon. Speculative instructions were allowing a bcopy at the start
289 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
290 * after our return, to copy data before the valid bit check above
291 * was done. As such, some of the copied data was stale. The barrier
292 * ensures the check is before any data is copied.
299 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
300 * @q: The Event Queue to disable interrupts
304 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
306 struct lpfc_register doorbell;
309 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
310 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
311 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
312 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
313 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
314 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
318 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
319 * @q: The Event Queue that the host has completed processing for.
320 * @arm: Indicates whether the host wants to arms this CQ.
322 * This routine will mark all Event Queue Entries on @q, from the last
323 * known completed entry to the last entry that was processed, as completed
324 * by clearing the valid bit for each completion queue entry. Then it will
325 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
326 * The internal host index in the @q will be updated by this routine to indicate
327 * that the host has finished processing the entries. The @arm parameter
328 * indicates that the queue should be rearmed when ringing the doorbell.
330 * This function will return the number of EQEs that were popped.
333 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
335 uint32_t released = 0;
336 struct lpfc_eqe *temp_eqe;
337 struct lpfc_register doorbell;
339 /* sanity check on queue memory */
343 /* while there are valid entries */
344 while (q->hba_index != q->host_index) {
345 temp_eqe = q->qe[q->host_index].eqe;
346 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
348 q->host_index = ((q->host_index + 1) % q->entry_count);
350 if (unlikely(released == 0 && !arm))
353 /* ring doorbell for number popped */
356 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
357 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
359 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
360 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
361 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
362 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
363 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
364 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
365 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
366 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
367 readl(q->phba->sli4_hba.EQCQDBregaddr);
372 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
373 * @q: The Completion Queue to get the first valid CQE from
375 * This routine will get the first valid Completion Queue Entry from @q, update
376 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
377 * the Queue (no more work to do), or the Queue is full of CQEs that have been
378 * processed, but not popped back to the HBA then this routine will return NULL.
380 static struct lpfc_cqe *
381 lpfc_sli4_cq_get(struct lpfc_queue *q)
383 struct lpfc_cqe *cqe;
386 /* sanity check on queue memory */
390 /* If the next CQE is not valid then we are done */
391 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
393 /* If the host has not yet processed the next entry then we are done */
394 idx = ((q->hba_index + 1) % q->entry_count);
395 if (idx == q->host_index)
398 cqe = q->qe[q->hba_index].cqe;
402 * insert barrier for instruction interlock : data from the hardware
403 * must have the valid bit checked before it can be copied and acted
404 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
405 * instructions allowing action on content before valid bit checked,
406 * add barrier here as well. May not be needed as "content" is a
407 * single 32-bit entity here (vs multi word structure for cq's).
414 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
415 * @q: The Completion Queue that the host has completed processing for.
416 * @arm: Indicates whether the host wants to arms this CQ.
418 * This routine will mark all Completion queue entries on @q, from the last
419 * known completed entry to the last entry that was processed, as completed
420 * by clearing the valid bit for each completion queue entry. Then it will
421 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
422 * The internal host index in the @q will be updated by this routine to indicate
423 * that the host has finished processing the entries. The @arm parameter
424 * indicates that the queue should be rearmed when ringing the doorbell.
426 * This function will return the number of CQEs that were released.
429 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
431 uint32_t released = 0;
432 struct lpfc_cqe *temp_qe;
433 struct lpfc_register doorbell;
435 /* sanity check on queue memory */
438 /* while there are valid entries */
439 while (q->hba_index != q->host_index) {
440 temp_qe = q->qe[q->host_index].cqe;
441 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
443 q->host_index = ((q->host_index + 1) % q->entry_count);
445 if (unlikely(released == 0 && !arm))
448 /* ring doorbell for number popped */
451 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
452 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
453 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
454 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
455 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
456 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
457 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
462 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
463 * @q: The Header Receive Queue to operate on.
464 * @wqe: The Receive Queue Entry to put on the Receive queue.
466 * This routine will copy the contents of @wqe to the next available entry on
467 * the @q. This function will then ring the Receive Queue Doorbell to signal the
468 * HBA to start processing the Receive Queue Entry. This function returns the
469 * index that the rqe was copied to if successful. If no entries are available
470 * on @q then this function will return -ENOMEM.
471 * The caller is expected to hold the hbalock when calling this routine.
474 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
475 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
477 struct lpfc_rqe *temp_hrqe;
478 struct lpfc_rqe *temp_drqe;
479 struct lpfc_register doorbell;
482 /* sanity check on queue memory */
483 if (unlikely(!hq) || unlikely(!dq))
485 put_index = hq->host_index;
486 temp_hrqe = hq->qe[put_index].rqe;
487 temp_drqe = dq->qe[dq->host_index].rqe;
489 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
491 if (put_index != dq->host_index)
493 /* If the host has not yet processed the next entry then we are done */
494 if (((put_index + 1) % hq->entry_count) == hq->hba_index)
496 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
497 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
499 /* Update the host index to point to the next slot */
500 hq->host_index = ((put_index + 1) % hq->entry_count);
501 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
504 /* Ring The Header Receive Queue Doorbell */
505 if (!(hq->host_index % hq->entry_repost)) {
507 if (hq->db_format == LPFC_DB_RING_FORMAT) {
508 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
510 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
511 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
512 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
514 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
516 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
520 writel(doorbell.word0, hq->db_regaddr);
526 * lpfc_sli4_rq_release - Updates internal hba index for RQ
527 * @q: The Header Receive Queue to operate on.
529 * This routine will update the HBA index of a queue to reflect consumption of
530 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
531 * consumed an entry the host calls this function to update the queue's
532 * internal pointers. This routine returns the number of entries that were
533 * consumed by the HBA.
536 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
538 /* sanity check on queue memory */
539 if (unlikely(!hq) || unlikely(!dq))
542 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
544 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
545 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
550 * lpfc_cmd_iocb - Get next command iocb entry in the ring
551 * @phba: Pointer to HBA context object.
552 * @pring: Pointer to driver SLI ring object.
554 * This function returns pointer to next command iocb entry
555 * in the command ring. The caller must hold hbalock to prevent
556 * other threads consume the next command iocb.
557 * SLI-2/SLI-3 provide different sized iocbs.
559 static inline IOCB_t *
560 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
562 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
563 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
567 * lpfc_resp_iocb - Get next response iocb entry in the ring
568 * @phba: Pointer to HBA context object.
569 * @pring: Pointer to driver SLI ring object.
571 * This function returns pointer to next response iocb entry
572 * in the response ring. The caller must hold hbalock to make sure
573 * that no other thread consume the next response iocb.
574 * SLI-2/SLI-3 provide different sized iocbs.
576 static inline IOCB_t *
577 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
579 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
580 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
584 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
585 * @phba: Pointer to HBA context object.
587 * This function is called with hbalock held. This function
588 * allocates a new driver iocb object from the iocb pool. If the
589 * allocation is successful, it returns pointer to the newly
590 * allocated iocb object else it returns NULL.
593 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
595 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
596 struct lpfc_iocbq * iocbq = NULL;
598 lockdep_assert_held(&phba->hbalock);
600 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
603 if (phba->iocb_cnt > phba->iocb_max)
604 phba->iocb_max = phba->iocb_cnt;
609 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
610 * @phba: Pointer to HBA context object.
611 * @xritag: XRI value.
613 * This function clears the sglq pointer from the array of acive
614 * sglq's. The xritag that is passed in is used to index into the
615 * array. Before the xritag can be used it needs to be adjusted
616 * by subtracting the xribase.
618 * Returns sglq ponter = success, NULL = Failure.
621 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
623 struct lpfc_sglq *sglq;
625 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
626 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
631 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
632 * @phba: Pointer to HBA context object.
633 * @xritag: XRI value.
635 * This function returns the sglq pointer from the array of acive
636 * sglq's. The xritag that is passed in is used to index into the
637 * array. Before the xritag can be used it needs to be adjusted
638 * by subtracting the xribase.
640 * Returns sglq ponter = success, NULL = Failure.
643 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
645 struct lpfc_sglq *sglq;
647 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
652 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
653 * @phba: Pointer to HBA context object.
654 * @xritag: xri used in this exchange.
655 * @rrq: The RRQ to be cleared.
659 lpfc_clr_rrq_active(struct lpfc_hba *phba,
661 struct lpfc_node_rrq *rrq)
663 struct lpfc_nodelist *ndlp = NULL;
665 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
666 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
668 /* The target DID could have been swapped (cable swap)
669 * we should use the ndlp from the findnode if it is
672 if ((!ndlp) && rrq->ndlp)
678 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
681 rrq->rrq_stop_time = 0;
684 mempool_free(rrq, phba->rrq_pool);
688 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
689 * @phba: Pointer to HBA context object.
691 * This function is called with hbalock held. This function
692 * Checks if stop_time (ratov from setting rrq active) has
693 * been reached, if it has and the send_rrq flag is set then
694 * it will call lpfc_send_rrq. If the send_rrq flag is not set
695 * then it will just call the routine to clear the rrq and
696 * free the rrq resource.
697 * The timer is set to the next rrq that is going to expire before
698 * leaving the routine.
702 lpfc_handle_rrq_active(struct lpfc_hba *phba)
704 struct lpfc_node_rrq *rrq;
705 struct lpfc_node_rrq *nextrrq;
706 unsigned long next_time;
707 unsigned long iflags;
710 spin_lock_irqsave(&phba->hbalock, iflags);
711 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
712 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
713 list_for_each_entry_safe(rrq, nextrrq,
714 &phba->active_rrq_list, list) {
715 if (time_after(jiffies, rrq->rrq_stop_time))
716 list_move(&rrq->list, &send_rrq);
717 else if (time_before(rrq->rrq_stop_time, next_time))
718 next_time = rrq->rrq_stop_time;
720 spin_unlock_irqrestore(&phba->hbalock, iflags);
721 if ((!list_empty(&phba->active_rrq_list)) &&
722 (!(phba->pport->load_flag & FC_UNLOADING)))
723 mod_timer(&phba->rrq_tmr, next_time);
724 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
725 list_del(&rrq->list);
727 /* this call will free the rrq */
728 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
729 else if (lpfc_send_rrq(phba, rrq)) {
730 /* if we send the rrq then the completion handler
731 * will clear the bit in the xribitmap.
733 lpfc_clr_rrq_active(phba, rrq->xritag,
740 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
741 * @vport: Pointer to vport context object.
742 * @xri: The xri used in the exchange.
743 * @did: The targets DID for this exchange.
745 * returns NULL = rrq not found in the phba->active_rrq_list.
746 * rrq = rrq for this xri and target.
748 struct lpfc_node_rrq *
749 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
751 struct lpfc_hba *phba = vport->phba;
752 struct lpfc_node_rrq *rrq;
753 struct lpfc_node_rrq *nextrrq;
754 unsigned long iflags;
756 if (phba->sli_rev != LPFC_SLI_REV4)
758 spin_lock_irqsave(&phba->hbalock, iflags);
759 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
760 if (rrq->vport == vport && rrq->xritag == xri &&
761 rrq->nlp_DID == did){
762 list_del(&rrq->list);
763 spin_unlock_irqrestore(&phba->hbalock, iflags);
767 spin_unlock_irqrestore(&phba->hbalock, iflags);
772 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
773 * @vport: Pointer to vport context object.
774 * @ndlp: Pointer to the lpfc_node_list structure.
775 * If ndlp is NULL Remove all active RRQs for this vport from the
776 * phba->active_rrq_list and clear the rrq.
777 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
780 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
783 struct lpfc_hba *phba = vport->phba;
784 struct lpfc_node_rrq *rrq;
785 struct lpfc_node_rrq *nextrrq;
786 unsigned long iflags;
789 if (phba->sli_rev != LPFC_SLI_REV4)
792 lpfc_sli4_vport_delete_els_xri_aborted(vport);
793 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
795 spin_lock_irqsave(&phba->hbalock, iflags);
796 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
797 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
798 list_move(&rrq->list, &rrq_list);
799 spin_unlock_irqrestore(&phba->hbalock, iflags);
801 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
802 list_del(&rrq->list);
803 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
808 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
809 * @phba: Pointer to HBA context object.
810 * @ndlp: Targets nodelist pointer for this exchange.
811 * @xritag the xri in the bitmap to test.
813 * This function is called with hbalock held. This function
814 * returns 0 = rrq not active for this xri
815 * 1 = rrq is valid for this xri.
818 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
821 lockdep_assert_held(&phba->hbalock);
824 if (!ndlp->active_rrqs_xri_bitmap)
826 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
833 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
834 * @phba: Pointer to HBA context object.
835 * @ndlp: nodelist pointer for this target.
836 * @xritag: xri used in this exchange.
837 * @rxid: Remote Exchange ID.
838 * @send_rrq: Flag used to determine if we should send rrq els cmd.
840 * This function takes the hbalock.
841 * The active bit is always set in the active rrq xri_bitmap even
842 * if there is no slot avaiable for the other rrq information.
844 * returns 0 rrq actived for this xri
845 * < 0 No memory or invalid ndlp.
848 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
849 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
851 unsigned long iflags;
852 struct lpfc_node_rrq *rrq;
858 if (!phba->cfg_enable_rrq)
861 spin_lock_irqsave(&phba->hbalock, iflags);
862 if (phba->pport->load_flag & FC_UNLOADING) {
863 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
868 * set the active bit even if there is no mem available.
870 if (NLP_CHK_FREE_REQ(ndlp))
873 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
876 if (!ndlp->active_rrqs_xri_bitmap)
879 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
882 spin_unlock_irqrestore(&phba->hbalock, iflags);
883 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
885 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
886 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
887 " DID:0x%x Send:%d\n",
888 xritag, rxid, ndlp->nlp_DID, send_rrq);
891 if (phba->cfg_enable_rrq == 1)
892 rrq->send_rrq = send_rrq;
895 rrq->xritag = xritag;
896 rrq->rrq_stop_time = jiffies +
897 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
899 rrq->nlp_DID = ndlp->nlp_DID;
900 rrq->vport = ndlp->vport;
902 spin_lock_irqsave(&phba->hbalock, iflags);
903 empty = list_empty(&phba->active_rrq_list);
904 list_add_tail(&rrq->list, &phba->active_rrq_list);
905 phba->hba_flag |= HBA_RRQ_ACTIVE;
907 lpfc_worker_wake_up(phba);
908 spin_unlock_irqrestore(&phba->hbalock, iflags);
911 spin_unlock_irqrestore(&phba->hbalock, iflags);
912 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
913 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
914 " DID:0x%x Send:%d\n",
915 xritag, rxid, ndlp->nlp_DID, send_rrq);
920 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
921 * @phba: Pointer to HBA context object.
922 * @piocb: Pointer to the iocbq.
924 * This function is called with the ring lock held. This function
925 * gets a new driver sglq object from the sglq list. If the
926 * list is not empty then it is successful, it returns pointer to the newly
927 * allocated sglq object else it returns NULL.
929 static struct lpfc_sglq *
930 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
932 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
933 struct lpfc_sglq *sglq = NULL;
934 struct lpfc_sglq *start_sglq = NULL;
935 struct lpfc_scsi_buf *lpfc_cmd;
936 struct lpfc_nodelist *ndlp;
939 lockdep_assert_held(&phba->hbalock);
941 if (piocbq->iocb_flag & LPFC_IO_FCP) {
942 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
943 ndlp = lpfc_cmd->rdata->pnode;
944 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
945 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
946 ndlp = piocbq->context_un.ndlp;
947 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
948 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
951 ndlp = piocbq->context_un.ndlp;
953 ndlp = piocbq->context1;
956 spin_lock(&phba->sli4_hba.sgl_list_lock);
957 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
962 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
963 test_bit(sglq->sli4_lxritag,
964 ndlp->active_rrqs_xri_bitmap)) {
965 /* This xri has an rrq outstanding for this DID.
966 * put it back in the list and get another xri.
968 list_add_tail(&sglq->list, lpfc_els_sgl_list);
970 list_remove_head(lpfc_els_sgl_list, sglq,
971 struct lpfc_sglq, list);
972 if (sglq == start_sglq) {
973 list_add_tail(&sglq->list, lpfc_els_sgl_list);
981 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
982 sglq->state = SGL_ALLOCATED;
984 spin_unlock(&phba->sli4_hba.sgl_list_lock);
989 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
990 * @phba: Pointer to HBA context object.
991 * @piocb: Pointer to the iocbq.
993 * This function is called with the sgl_list lock held. This function
994 * gets a new driver sglq object from the sglq list. If the
995 * list is not empty then it is successful, it returns pointer to the newly
996 * allocated sglq object else it returns NULL.
999 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1001 struct list_head *lpfc_nvmet_sgl_list;
1002 struct lpfc_sglq *sglq = NULL;
1004 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1006 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1008 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1011 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1012 sglq->state = SGL_ALLOCATED;
1017 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1018 * @phba: Pointer to HBA context object.
1020 * This function is called with no lock held. This function
1021 * allocates a new driver iocb object from the iocb pool. If the
1022 * allocation is successful, it returns pointer to the newly
1023 * allocated iocb object else it returns NULL.
1026 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1028 struct lpfc_iocbq * iocbq = NULL;
1029 unsigned long iflags;
1031 spin_lock_irqsave(&phba->hbalock, iflags);
1032 iocbq = __lpfc_sli_get_iocbq(phba);
1033 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1039 * @phba: Pointer to HBA context object.
1040 * @iocbq: Pointer to driver iocb object.
1042 * This function is called with hbalock held to release driver
1043 * iocb object to the iocb pool. The iotag in the iocb object
1044 * does not change for each use of the iocb object. This function
1045 * clears all other fields of the iocb object when it is freed.
1046 * The sqlq structure that holds the xritag and phys and virtual
1047 * mappings for the scatter gather list is retrieved from the
1048 * active array of sglq. The get of the sglq pointer also clears
1049 * the entry in the array. If the status of the IO indiactes that
1050 * this IO was aborted then the sglq entry it put on the
1051 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1052 * IO has good status or fails for any other reason then the sglq
1053 * entry is added to the free list (lpfc_els_sgl_list).
1056 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1058 struct lpfc_sglq *sglq;
1059 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1060 unsigned long iflag = 0;
1061 struct lpfc_sli_ring *pring;
1063 lockdep_assert_held(&phba->hbalock);
1065 if (iocbq->sli4_xritag == NO_XRI)
1068 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1072 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1073 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1075 sglq->state = SGL_FREED;
1077 list_add_tail(&sglq->list,
1078 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1079 spin_unlock_irqrestore(
1080 &phba->sli4_hba.sgl_list_lock, iflag);
1084 pring = phba->sli4_hba.els_wq->pring;
1085 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1086 (sglq->state != SGL_XRI_ABORTED)) {
1087 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1089 list_add(&sglq->list,
1090 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1091 spin_unlock_irqrestore(
1092 &phba->sli4_hba.sgl_list_lock, iflag);
1094 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1096 sglq->state = SGL_FREED;
1098 list_add_tail(&sglq->list,
1099 &phba->sli4_hba.lpfc_els_sgl_list);
1100 spin_unlock_irqrestore(
1101 &phba->sli4_hba.sgl_list_lock, iflag);
1103 /* Check if TXQ queue needs to be serviced */
1104 if (!list_empty(&pring->txq))
1105 lpfc_worker_wake_up(phba);
1111 * Clean all volatile data fields, preserve iotag and node struct.
1113 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1114 iocbq->sli4_lxritag = NO_XRI;
1115 iocbq->sli4_xritag = NO_XRI;
1116 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1123 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1124 * @phba: Pointer to HBA context object.
1125 * @iocbq: Pointer to driver iocb object.
1127 * This function is called with hbalock held to release driver
1128 * iocb object to the iocb pool. The iotag in the iocb object
1129 * does not change for each use of the iocb object. This function
1130 * clears all other fields of the iocb object when it is freed.
1133 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1135 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1137 lockdep_assert_held(&phba->hbalock);
1140 * Clean all volatile data fields, preserve iotag and node struct.
1142 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1143 iocbq->sli4_xritag = NO_XRI;
1144 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1148 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1149 * @phba: Pointer to HBA context object.
1150 * @iocbq: Pointer to driver iocb object.
1152 * This function is called with hbalock held to release driver
1153 * iocb object to the iocb pool. The iotag in the iocb object
1154 * does not change for each use of the iocb object. This function
1155 * clears all other fields of the iocb object when it is freed.
1158 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1160 lockdep_assert_held(&phba->hbalock);
1162 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1168 * @phba: Pointer to HBA context object.
1169 * @iocbq: Pointer to driver iocb object.
1171 * This function is called with no lock held to release the iocb to
1175 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1177 unsigned long iflags;
1180 * Clean all volatile data fields, preserve iotag and node struct.
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 __lpfc_sli_release_iocbq(phba, iocbq);
1184 spin_unlock_irqrestore(&phba->hbalock, iflags);
1188 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1189 * @phba: Pointer to HBA context object.
1190 * @iocblist: List of IOCBs.
1191 * @ulpstatus: ULP status in IOCB command field.
1192 * @ulpWord4: ULP word-4 in IOCB command field.
1194 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1195 * on the list by invoking the complete callback function associated with the
1196 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1200 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1201 uint32_t ulpstatus, uint32_t ulpWord4)
1203 struct lpfc_iocbq *piocb;
1205 while (!list_empty(iocblist)) {
1206 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1207 if (!piocb->iocb_cmpl)
1208 lpfc_sli_release_iocbq(phba, piocb);
1210 piocb->iocb.ulpStatus = ulpstatus;
1211 piocb->iocb.un.ulpWord[4] = ulpWord4;
1212 (piocb->iocb_cmpl) (phba, piocb, piocb);
1219 * lpfc_sli_iocb_cmd_type - Get the iocb type
1220 * @iocb_cmnd: iocb command code.
1222 * This function is called by ring event handler function to get the iocb type.
1223 * This function translates the iocb command to an iocb command type used to
1224 * decide the final disposition of each completed IOCB.
1225 * The function returns
1226 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1227 * LPFC_SOL_IOCB if it is a solicited iocb completion
1228 * LPFC_ABORT_IOCB if it is an abort iocb
1229 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1231 * The caller is not required to hold any lock.
1233 static lpfc_iocb_type
1234 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1236 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1238 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1241 switch (iocb_cmnd) {
1242 case CMD_XMIT_SEQUENCE_CR:
1243 case CMD_XMIT_SEQUENCE_CX:
1244 case CMD_XMIT_BCAST_CN:
1245 case CMD_XMIT_BCAST_CX:
1246 case CMD_ELS_REQUEST_CR:
1247 case CMD_ELS_REQUEST_CX:
1248 case CMD_CREATE_XRI_CR:
1249 case CMD_CREATE_XRI_CX:
1250 case CMD_GET_RPI_CN:
1251 case CMD_XMIT_ELS_RSP_CX:
1252 case CMD_GET_RPI_CR:
1253 case CMD_FCP_IWRITE_CR:
1254 case CMD_FCP_IWRITE_CX:
1255 case CMD_FCP_IREAD_CR:
1256 case CMD_FCP_IREAD_CX:
1257 case CMD_FCP_ICMND_CR:
1258 case CMD_FCP_ICMND_CX:
1259 case CMD_FCP_TSEND_CX:
1260 case CMD_FCP_TRSP_CX:
1261 case CMD_FCP_TRECEIVE_CX:
1262 case CMD_FCP_AUTO_TRSP_CX:
1263 case CMD_ADAPTER_MSG:
1264 case CMD_ADAPTER_DUMP:
1265 case CMD_XMIT_SEQUENCE64_CR:
1266 case CMD_XMIT_SEQUENCE64_CX:
1267 case CMD_XMIT_BCAST64_CN:
1268 case CMD_XMIT_BCAST64_CX:
1269 case CMD_ELS_REQUEST64_CR:
1270 case CMD_ELS_REQUEST64_CX:
1271 case CMD_FCP_IWRITE64_CR:
1272 case CMD_FCP_IWRITE64_CX:
1273 case CMD_FCP_IREAD64_CR:
1274 case CMD_FCP_IREAD64_CX:
1275 case CMD_FCP_ICMND64_CR:
1276 case CMD_FCP_ICMND64_CX:
1277 case CMD_FCP_TSEND64_CX:
1278 case CMD_FCP_TRSP64_CX:
1279 case CMD_FCP_TRECEIVE64_CX:
1280 case CMD_GEN_REQUEST64_CR:
1281 case CMD_GEN_REQUEST64_CX:
1282 case CMD_XMIT_ELS_RSP64_CX:
1283 case DSSCMD_IWRITE64_CR:
1284 case DSSCMD_IWRITE64_CX:
1285 case DSSCMD_IREAD64_CR:
1286 case DSSCMD_IREAD64_CX:
1287 type = LPFC_SOL_IOCB;
1289 case CMD_ABORT_XRI_CN:
1290 case CMD_ABORT_XRI_CX:
1291 case CMD_CLOSE_XRI_CN:
1292 case CMD_CLOSE_XRI_CX:
1293 case CMD_XRI_ABORTED_CX:
1294 case CMD_ABORT_MXRI64_CN:
1295 case CMD_XMIT_BLS_RSP64_CX:
1296 type = LPFC_ABORT_IOCB;
1298 case CMD_RCV_SEQUENCE_CX:
1299 case CMD_RCV_ELS_REQ_CX:
1300 case CMD_RCV_SEQUENCE64_CX:
1301 case CMD_RCV_ELS_REQ64_CX:
1302 case CMD_ASYNC_STATUS:
1303 case CMD_IOCB_RCV_SEQ64_CX:
1304 case CMD_IOCB_RCV_ELS64_CX:
1305 case CMD_IOCB_RCV_CONT64_CX:
1306 case CMD_IOCB_RET_XRI64_CX:
1307 type = LPFC_UNSOL_IOCB;
1309 case CMD_IOCB_XMIT_MSEQ64_CR:
1310 case CMD_IOCB_XMIT_MSEQ64_CX:
1311 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1312 case CMD_IOCB_RCV_ELS_LIST64_CX:
1313 case CMD_IOCB_CLOSE_EXTENDED_CN:
1314 case CMD_IOCB_ABORT_EXTENDED_CN:
1315 case CMD_IOCB_RET_HBQE64_CN:
1316 case CMD_IOCB_FCP_IBIDIR64_CR:
1317 case CMD_IOCB_FCP_IBIDIR64_CX:
1318 case CMD_IOCB_FCP_ITASKMGT64_CX:
1319 case CMD_IOCB_LOGENTRY_CN:
1320 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1321 printk("%s - Unhandled SLI-3 Command x%x\n",
1322 __func__, iocb_cmnd);
1323 type = LPFC_UNKNOWN_IOCB;
1326 type = LPFC_UNKNOWN_IOCB;
1334 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1335 * @phba: Pointer to HBA context object.
1337 * This function is called from SLI initialization code
1338 * to configure every ring of the HBA's SLI interface. The
1339 * caller is not required to hold any lock. This function issues
1340 * a config_ring mailbox command for each ring.
1341 * This function returns zero if successful else returns a negative
1345 lpfc_sli_ring_map(struct lpfc_hba *phba)
1347 struct lpfc_sli *psli = &phba->sli;
1352 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1356 phba->link_state = LPFC_INIT_MBX_CMDS;
1357 for (i = 0; i < psli->num_rings; i++) {
1358 lpfc_config_ring(phba, i, pmb);
1359 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1360 if (rc != MBX_SUCCESS) {
1361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1362 "0446 Adapter failed to init (%d), "
1363 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1365 rc, pmbox->mbxCommand,
1366 pmbox->mbxStatus, i);
1367 phba->link_state = LPFC_HBA_ERROR;
1372 mempool_free(pmb, phba->mbox_mem_pool);
1377 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1378 * @phba: Pointer to HBA context object.
1379 * @pring: Pointer to driver SLI ring object.
1380 * @piocb: Pointer to the driver iocb object.
1382 * This function is called with hbalock held. The function adds the
1383 * new iocb to txcmplq of the given ring. This function always returns
1384 * 0. If this function is called for ELS ring, this function checks if
1385 * there is a vport associated with the ELS command. This function also
1386 * starts els_tmofunc timer if this is an ELS command.
1389 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1390 struct lpfc_iocbq *piocb)
1392 lockdep_assert_held(&phba->hbalock);
1396 list_add_tail(&piocb->list, &pring->txcmplq);
1397 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1399 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1400 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1401 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1402 BUG_ON(!piocb->vport);
1403 if (!(piocb->vport->load_flag & FC_UNLOADING))
1404 mod_timer(&piocb->vport->els_tmofunc,
1406 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1413 * lpfc_sli_ringtx_get - Get first element of the txq
1414 * @phba: Pointer to HBA context object.
1415 * @pring: Pointer to driver SLI ring object.
1417 * This function is called with hbalock held to get next
1418 * iocb in txq of the given ring. If there is any iocb in
1419 * the txq, the function returns first iocb in the list after
1420 * removing the iocb from the list, else it returns NULL.
1423 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1425 struct lpfc_iocbq *cmd_iocb;
1427 lockdep_assert_held(&phba->hbalock);
1429 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1434 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1435 * @phba: Pointer to HBA context object.
1436 * @pring: Pointer to driver SLI ring object.
1438 * This function is called with hbalock held and the caller must post the
1439 * iocb without releasing the lock. If the caller releases the lock,
1440 * iocb slot returned by the function is not guaranteed to be available.
1441 * The function returns pointer to the next available iocb slot if there
1442 * is available slot in the ring, else it returns NULL.
1443 * If the get index of the ring is ahead of the put index, the function
1444 * will post an error attention event to the worker thread to take the
1445 * HBA to offline state.
1448 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1450 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1451 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1453 lockdep_assert_held(&phba->hbalock);
1455 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1456 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1457 pring->sli.sli3.next_cmdidx = 0;
1459 if (unlikely(pring->sli.sli3.local_getidx ==
1460 pring->sli.sli3.next_cmdidx)) {
1462 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1464 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1466 "0315 Ring %d issue: portCmdGet %d "
1467 "is bigger than cmd ring %d\n",
1469 pring->sli.sli3.local_getidx,
1472 phba->link_state = LPFC_HBA_ERROR;
1474 * All error attention handlers are posted to
1477 phba->work_ha |= HA_ERATT;
1478 phba->work_hs = HS_FFER3;
1480 lpfc_worker_wake_up(phba);
1485 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1489 return lpfc_cmd_iocb(phba, pring);
1493 * lpfc_sli_next_iotag - Get an iotag for the iocb
1494 * @phba: Pointer to HBA context object.
1495 * @iocbq: Pointer to driver iocb object.
1497 * This function gets an iotag for the iocb. If there is no unused iotag and
1498 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1499 * array and assigns a new iotag.
1500 * The function returns the allocated iotag if successful, else returns zero.
1501 * Zero is not a valid iotag.
1502 * The caller is not required to hold any lock.
1505 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1507 struct lpfc_iocbq **new_arr;
1508 struct lpfc_iocbq **old_arr;
1510 struct lpfc_sli *psli = &phba->sli;
1513 spin_lock_irq(&phba->hbalock);
1514 iotag = psli->last_iotag;
1515 if(++iotag < psli->iocbq_lookup_len) {
1516 psli->last_iotag = iotag;
1517 psli->iocbq_lookup[iotag] = iocbq;
1518 spin_unlock_irq(&phba->hbalock);
1519 iocbq->iotag = iotag;
1521 } else if (psli->iocbq_lookup_len < (0xffff
1522 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1523 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1524 spin_unlock_irq(&phba->hbalock);
1525 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1528 spin_lock_irq(&phba->hbalock);
1529 old_arr = psli->iocbq_lookup;
1530 if (new_len <= psli->iocbq_lookup_len) {
1531 /* highly unprobable case */
1533 iotag = psli->last_iotag;
1534 if(++iotag < psli->iocbq_lookup_len) {
1535 psli->last_iotag = iotag;
1536 psli->iocbq_lookup[iotag] = iocbq;
1537 spin_unlock_irq(&phba->hbalock);
1538 iocbq->iotag = iotag;
1541 spin_unlock_irq(&phba->hbalock);
1544 if (psli->iocbq_lookup)
1545 memcpy(new_arr, old_arr,
1546 ((psli->last_iotag + 1) *
1547 sizeof (struct lpfc_iocbq *)));
1548 psli->iocbq_lookup = new_arr;
1549 psli->iocbq_lookup_len = new_len;
1550 psli->last_iotag = iotag;
1551 psli->iocbq_lookup[iotag] = iocbq;
1552 spin_unlock_irq(&phba->hbalock);
1553 iocbq->iotag = iotag;
1558 spin_unlock_irq(&phba->hbalock);
1560 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1561 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1568 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1569 * @phba: Pointer to HBA context object.
1570 * @pring: Pointer to driver SLI ring object.
1571 * @iocb: Pointer to iocb slot in the ring.
1572 * @nextiocb: Pointer to driver iocb object which need to be
1573 * posted to firmware.
1575 * This function is called with hbalock held to post a new iocb to
1576 * the firmware. This function copies the new iocb to ring iocb slot and
1577 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1578 * a completion call back for this iocb else the function will free the
1582 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1583 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1585 lockdep_assert_held(&phba->hbalock);
1589 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1592 if (pring->ringno == LPFC_ELS_RING) {
1593 lpfc_debugfs_slow_ring_trc(phba,
1594 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1595 *(((uint32_t *) &nextiocb->iocb) + 4),
1596 *(((uint32_t *) &nextiocb->iocb) + 6),
1597 *(((uint32_t *) &nextiocb->iocb) + 7));
1601 * Issue iocb command to adapter
1603 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1605 pring->stats.iocb_cmd++;
1608 * If there is no completion routine to call, we can release the
1609 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1610 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1612 if (nextiocb->iocb_cmpl)
1613 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1615 __lpfc_sli_release_iocbq(phba, nextiocb);
1618 * Let the HBA know what IOCB slot will be the next one the
1619 * driver will put a command into.
1621 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1622 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1626 * lpfc_sli_update_full_ring - Update the chip attention register
1627 * @phba: Pointer to HBA context object.
1628 * @pring: Pointer to driver SLI ring object.
1630 * The caller is not required to hold any lock for calling this function.
1631 * This function updates the chip attention bits for the ring to inform firmware
1632 * that there are pending work to be done for this ring and requests an
1633 * interrupt when there is space available in the ring. This function is
1634 * called when the driver is unable to post more iocbs to the ring due
1635 * to unavailability of space in the ring.
1638 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1640 int ringno = pring->ringno;
1642 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1647 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1648 * The HBA will tell us when an IOCB entry is available.
1650 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1651 readl(phba->CAregaddr); /* flush */
1653 pring->stats.iocb_cmd_full++;
1657 * lpfc_sli_update_ring - Update chip attention register
1658 * @phba: Pointer to HBA context object.
1659 * @pring: Pointer to driver SLI ring object.
1661 * This function updates the chip attention register bit for the
1662 * given ring to inform HBA that there is more work to be done
1663 * in this ring. The caller is not required to hold any lock.
1666 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1668 int ringno = pring->ringno;
1671 * Tell the HBA that there is work to do in this ring.
1673 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1675 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1676 readl(phba->CAregaddr); /* flush */
1681 * lpfc_sli_resume_iocb - Process iocbs in the txq
1682 * @phba: Pointer to HBA context object.
1683 * @pring: Pointer to driver SLI ring object.
1685 * This function is called with hbalock held to post pending iocbs
1686 * in the txq to the firmware. This function is called when driver
1687 * detects space available in the ring.
1690 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1693 struct lpfc_iocbq *nextiocb;
1695 lockdep_assert_held(&phba->hbalock);
1699 * (a) there is anything on the txq to send
1701 * (c) link attention events can be processed (fcp ring only)
1702 * (d) IOCB processing is not blocked by the outstanding mbox command.
1705 if (lpfc_is_link_up(phba) &&
1706 (!list_empty(&pring->txq)) &&
1707 (pring->ringno != LPFC_FCP_RING ||
1708 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1710 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1711 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1712 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1715 lpfc_sli_update_ring(phba, pring);
1717 lpfc_sli_update_full_ring(phba, pring);
1724 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1725 * @phba: Pointer to HBA context object.
1726 * @hbqno: HBQ number.
1728 * This function is called with hbalock held to get the next
1729 * available slot for the given HBQ. If there is free slot
1730 * available for the HBQ it will return pointer to the next available
1731 * HBQ entry else it will return NULL.
1733 static struct lpfc_hbq_entry *
1734 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1736 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1738 lockdep_assert_held(&phba->hbalock);
1740 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1741 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1742 hbqp->next_hbqPutIdx = 0;
1744 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1745 uint32_t raw_index = phba->hbq_get[hbqno];
1746 uint32_t getidx = le32_to_cpu(raw_index);
1748 hbqp->local_hbqGetIdx = getidx;
1750 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1751 lpfc_printf_log(phba, KERN_ERR,
1752 LOG_SLI | LOG_VPORT,
1753 "1802 HBQ %d: local_hbqGetIdx "
1754 "%u is > than hbqp->entry_count %u\n",
1755 hbqno, hbqp->local_hbqGetIdx,
1758 phba->link_state = LPFC_HBA_ERROR;
1762 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1766 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1771 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1772 * @phba: Pointer to HBA context object.
1774 * This function is called with no lock held to free all the
1775 * hbq buffers while uninitializing the SLI interface. It also
1776 * frees the HBQ buffers returned by the firmware but not yet
1777 * processed by the upper layers.
1780 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1782 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1783 struct hbq_dmabuf *hbq_buf;
1784 unsigned long flags;
1787 hbq_count = lpfc_sli_hbq_count();
1788 /* Return all memory used by all HBQs */
1789 spin_lock_irqsave(&phba->hbalock, flags);
1790 for (i = 0; i < hbq_count; ++i) {
1791 list_for_each_entry_safe(dmabuf, next_dmabuf,
1792 &phba->hbqs[i].hbq_buffer_list, list) {
1793 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1794 list_del(&hbq_buf->dbuf.list);
1795 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1797 phba->hbqs[i].buffer_count = 0;
1800 /* Mark the HBQs not in use */
1801 phba->hbq_in_use = 0;
1802 spin_unlock_irqrestore(&phba->hbalock, flags);
1806 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1807 * @phba: Pointer to HBA context object.
1808 * @hbqno: HBQ number.
1809 * @hbq_buf: Pointer to HBQ buffer.
1811 * This function is called with the hbalock held to post a
1812 * hbq buffer to the firmware. If the function finds an empty
1813 * slot in the HBQ, it will post the buffer. The function will return
1814 * pointer to the hbq entry if it successfully post the buffer
1815 * else it will return NULL.
1818 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1819 struct hbq_dmabuf *hbq_buf)
1821 lockdep_assert_held(&phba->hbalock);
1822 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1826 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1827 * @phba: Pointer to HBA context object.
1828 * @hbqno: HBQ number.
1829 * @hbq_buf: Pointer to HBQ buffer.
1831 * This function is called with the hbalock held to post a hbq buffer to the
1832 * firmware. If the function finds an empty slot in the HBQ, it will post the
1833 * buffer and place it on the hbq_buffer_list. The function will return zero if
1834 * it successfully post the buffer else it will return an error.
1837 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1838 struct hbq_dmabuf *hbq_buf)
1840 struct lpfc_hbq_entry *hbqe;
1841 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1843 lockdep_assert_held(&phba->hbalock);
1844 /* Get next HBQ entry slot to use */
1845 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1847 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1849 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1850 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1851 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
1852 hbqe->bde.tus.f.bdeFlags = 0;
1853 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1854 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1856 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1857 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1859 readl(phba->hbq_put + hbqno);
1860 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1867 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1868 * @phba: Pointer to HBA context object.
1869 * @hbqno: HBQ number.
1870 * @hbq_buf: Pointer to HBQ buffer.
1872 * This function is called with the hbalock held to post an RQE to the SLI4
1873 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1874 * the hbq_buffer_list and return zero, otherwise it will return an error.
1877 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1878 struct hbq_dmabuf *hbq_buf)
1881 struct lpfc_rqe hrqe;
1882 struct lpfc_rqe drqe;
1883 struct lpfc_queue *hrq;
1884 struct lpfc_queue *drq;
1886 if (hbqno != LPFC_ELS_HBQ)
1888 hrq = phba->sli4_hba.hdr_rq;
1889 drq = phba->sli4_hba.dat_rq;
1891 lockdep_assert_held(&phba->hbalock);
1892 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1893 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1894 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1895 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1896 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
1899 hbq_buf->tag = (rc | (hbqno << 16));
1900 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1904 /* HBQ for ELS and CT traffic. */
1905 static struct lpfc_hbq_init lpfc_els_hbq = {
1910 .ring_mask = (1 << LPFC_ELS_RING),
1917 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1922 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1923 * @phba: Pointer to HBA context object.
1924 * @hbqno: HBQ number.
1925 * @count: Number of HBQ buffers to be posted.
1927 * This function is called with no lock held to post more hbq buffers to the
1928 * given HBQ. The function returns the number of HBQ buffers successfully
1932 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1934 uint32_t i, posted = 0;
1935 unsigned long flags;
1936 struct hbq_dmabuf *hbq_buffer;
1937 LIST_HEAD(hbq_buf_list);
1938 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1941 if ((phba->hbqs[hbqno].buffer_count + count) >
1942 lpfc_hbq_defs[hbqno]->entry_count)
1943 count = lpfc_hbq_defs[hbqno]->entry_count -
1944 phba->hbqs[hbqno].buffer_count;
1947 /* Allocate HBQ entries */
1948 for (i = 0; i < count; i++) {
1949 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1952 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1954 /* Check whether HBQ is still in use */
1955 spin_lock_irqsave(&phba->hbalock, flags);
1956 if (!phba->hbq_in_use)
1958 while (!list_empty(&hbq_buf_list)) {
1959 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1961 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1963 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1964 phba->hbqs[hbqno].buffer_count++;
1967 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1969 spin_unlock_irqrestore(&phba->hbalock, flags);
1972 spin_unlock_irqrestore(&phba->hbalock, flags);
1973 while (!list_empty(&hbq_buf_list)) {
1974 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1976 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1982 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1983 * @phba: Pointer to HBA context object.
1986 * This function posts more buffers to the HBQ. This function
1987 * is called with no lock held. The function returns the number of HBQ entries
1988 * successfully allocated.
1991 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1993 if (phba->sli_rev == LPFC_SLI_REV4)
1996 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1997 lpfc_hbq_defs[qno]->add_count);
2001 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2002 * @phba: Pointer to HBA context object.
2003 * @qno: HBQ queue number.
2005 * This function is called from SLI initialization code path with
2006 * no lock held to post initial HBQ buffers to firmware. The
2007 * function returns the number of HBQ entries successfully allocated.
2010 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2012 if (phba->sli_rev == LPFC_SLI_REV4)
2013 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2014 lpfc_hbq_defs[qno]->entry_count);
2016 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2017 lpfc_hbq_defs[qno]->init_count);
2021 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2022 * @phba: Pointer to HBA context object.
2023 * @hbqno: HBQ number.
2025 * This function removes the first hbq buffer on an hbq list and returns a
2026 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2028 static struct hbq_dmabuf *
2029 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2031 struct lpfc_dmabuf *d_buf;
2033 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2036 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2040 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2041 * @phba: Pointer to HBA context object.
2042 * @hbqno: HBQ number.
2044 * This function removes the first RQ buffer on an RQ buffer list and returns a
2045 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2047 static struct rqb_dmabuf *
2048 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2050 struct lpfc_dmabuf *h_buf;
2051 struct lpfc_rqb *rqbp;
2054 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2055 struct lpfc_dmabuf, list);
2058 rqbp->buffer_count--;
2059 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2063 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2064 * @phba: Pointer to HBA context object.
2065 * @tag: Tag of the hbq buffer.
2067 * This function searches for the hbq buffer associated with the given tag in
2068 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2069 * otherwise it returns NULL.
2071 static struct hbq_dmabuf *
2072 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2074 struct lpfc_dmabuf *d_buf;
2075 struct hbq_dmabuf *hbq_buf;
2079 if (hbqno >= LPFC_MAX_HBQS)
2082 spin_lock_irq(&phba->hbalock);
2083 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2084 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2085 if (hbq_buf->tag == tag) {
2086 spin_unlock_irq(&phba->hbalock);
2090 spin_unlock_irq(&phba->hbalock);
2091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2092 "1803 Bad hbq tag. Data: x%x x%x\n",
2093 tag, phba->hbqs[tag >> 16].buffer_count);
2098 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2099 * @phba: Pointer to HBA context object.
2100 * @hbq_buffer: Pointer to HBQ buffer.
2102 * This function is called with hbalock. This function gives back
2103 * the hbq buffer to firmware. If the HBQ does not have space to
2104 * post the buffer, it will free the buffer.
2107 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2112 hbqno = hbq_buffer->tag >> 16;
2113 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2114 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2119 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2120 * @mbxCommand: mailbox command code.
2122 * This function is called by the mailbox event handler function to verify
2123 * that the completed mailbox command is a legitimate mailbox command. If the
2124 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2125 * and the mailbox event handler will take the HBA offline.
2128 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2132 switch (mbxCommand) {
2136 case MBX_WRITE_VPARMS:
2137 case MBX_RUN_BIU_DIAG:
2140 case MBX_CONFIG_LINK:
2141 case MBX_CONFIG_RING:
2142 case MBX_RESET_RING:
2143 case MBX_READ_CONFIG:
2144 case MBX_READ_RCONFIG:
2145 case MBX_READ_SPARM:
2146 case MBX_READ_STATUS:
2150 case MBX_READ_LNK_STAT:
2152 case MBX_UNREG_LOGIN:
2154 case MBX_DUMP_MEMORY:
2155 case MBX_DUMP_CONTEXT:
2158 case MBX_UPDATE_CFG:
2160 case MBX_DEL_LD_ENTRY:
2161 case MBX_RUN_PROGRAM:
2163 case MBX_SET_VARIABLE:
2164 case MBX_UNREG_D_ID:
2165 case MBX_KILL_BOARD:
2166 case MBX_CONFIG_FARP:
2169 case MBX_RUN_BIU_DIAG64:
2170 case MBX_CONFIG_PORT:
2171 case MBX_READ_SPARM64:
2172 case MBX_READ_RPI64:
2173 case MBX_REG_LOGIN64:
2174 case MBX_READ_TOPOLOGY:
2177 case MBX_LOAD_EXP_ROM:
2178 case MBX_ASYNCEVT_ENABLE:
2182 case MBX_PORT_CAPABILITIES:
2183 case MBX_PORT_IOV_CONTROL:
2184 case MBX_SLI4_CONFIG:
2185 case MBX_SLI4_REQ_FTRS:
2187 case MBX_UNREG_FCFI:
2192 case MBX_RESUME_RPI:
2193 case MBX_READ_EVENT_LOG_STATUS:
2194 case MBX_READ_EVENT_LOG:
2195 case MBX_SECURITY_MGMT:
2197 case MBX_ACCESS_VDATA:
2208 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2209 * @phba: Pointer to HBA context object.
2210 * @pmboxq: Pointer to mailbox command.
2212 * This is completion handler function for mailbox commands issued from
2213 * lpfc_sli_issue_mbox_wait function. This function is called by the
2214 * mailbox event handler function with no lock held. This function
2215 * will wake up thread waiting on the wait queue pointed by context1
2219 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2221 wait_queue_head_t *pdone_q;
2222 unsigned long drvr_flag;
2225 * If pdone_q is empty, the driver thread gave up waiting and
2226 * continued running.
2228 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2229 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2230 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2232 wake_up_interruptible(pdone_q);
2233 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2239 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2240 * @phba: Pointer to HBA context object.
2241 * @pmb: Pointer to mailbox object.
2243 * This function is the default mailbox completion handler. It
2244 * frees the memory resources associated with the completed mailbox
2245 * command. If the completed command is a REG_LOGIN mailbox command,
2246 * this function will issue a UREG_LOGIN to re-claim the RPI.
2249 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2251 struct lpfc_vport *vport = pmb->vport;
2252 struct lpfc_dmabuf *mp;
2253 struct lpfc_nodelist *ndlp;
2254 struct Scsi_Host *shost;
2258 mp = (struct lpfc_dmabuf *) (pmb->context1);
2261 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2266 * If a REG_LOGIN succeeded after node is destroyed or node
2267 * is in re-discovery driver need to cleanup the RPI.
2269 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2270 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2271 !pmb->u.mb.mbxStatus) {
2272 rpi = pmb->u.mb.un.varWords[0];
2273 vpi = pmb->u.mb.un.varRegLogin.vpi;
2274 if (phba->sli_rev == LPFC_SLI_REV4)
2275 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2276 lpfc_unreg_login(phba, vpi, rpi, pmb);
2278 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2280 if (rc != MBX_NOT_FINISHED)
2284 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2285 !(phba->pport->load_flag & FC_UNLOADING) &&
2286 !pmb->u.mb.mbxStatus) {
2287 shost = lpfc_shost_from_vport(vport);
2288 spin_lock_irq(shost->host_lock);
2289 vport->vpi_state |= LPFC_VPI_REGISTERED;
2290 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2291 spin_unlock_irq(shost->host_lock);
2294 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2295 ndlp = (struct lpfc_nodelist *)pmb->context2;
2297 pmb->context2 = NULL;
2300 /* Check security permission status on INIT_LINK mailbox command */
2301 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2302 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2303 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2304 "2860 SLI authentication is required "
2305 "for INIT_LINK but has not done yet\n");
2307 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2308 lpfc_sli4_mbox_cmd_free(phba, pmb);
2310 mempool_free(pmb, phba->mbox_mem_pool);
2313 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2314 * @phba: Pointer to HBA context object.
2315 * @pmb: Pointer to mailbox object.
2317 * This function is the unreg rpi mailbox completion handler. It
2318 * frees the memory resources associated with the completed mailbox
2319 * command. An additional refrenece is put on the ndlp to prevent
2320 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2321 * the unreg mailbox command completes, this routine puts the
2326 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2328 struct lpfc_vport *vport = pmb->vport;
2329 struct lpfc_nodelist *ndlp;
2331 ndlp = pmb->context1;
2332 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2333 if (phba->sli_rev == LPFC_SLI_REV4 &&
2334 (bf_get(lpfc_sli_intf_if_type,
2335 &phba->sli4_hba.sli_intf) ==
2336 LPFC_SLI_INTF_IF_TYPE_2)) {
2338 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2339 "0010 UNREG_LOGIN vpi:%x "
2340 "rpi:%x DID:%x map:%x %p\n",
2341 vport->vpi, ndlp->nlp_rpi,
2343 ndlp->nlp_usg_map, ndlp);
2344 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2350 mempool_free(pmb, phba->mbox_mem_pool);
2354 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2355 * @phba: Pointer to HBA context object.
2357 * This function is called with no lock held. This function processes all
2358 * the completed mailbox commands and gives it to upper layers. The interrupt
2359 * service routine processes mailbox completion interrupt and adds completed
2360 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2361 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2362 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2363 * function returns the mailbox commands to the upper layer by calling the
2364 * completion handler function of each mailbox.
2367 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2374 phba->sli.slistat.mbox_event++;
2376 /* Get all completed mailboxe buffers into the cmplq */
2377 spin_lock_irq(&phba->hbalock);
2378 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2379 spin_unlock_irq(&phba->hbalock);
2381 /* Get a Mailbox buffer to setup mailbox commands for callback */
2383 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2389 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2391 lpfc_debugfs_disc_trc(pmb->vport,
2392 LPFC_DISC_TRC_MBOX_VPORT,
2393 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2394 (uint32_t)pmbox->mbxCommand,
2395 pmbox->un.varWords[0],
2396 pmbox->un.varWords[1]);
2399 lpfc_debugfs_disc_trc(phba->pport,
2401 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2402 (uint32_t)pmbox->mbxCommand,
2403 pmbox->un.varWords[0],
2404 pmbox->un.varWords[1]);
2409 * It is a fatal error if unknown mbox command completion.
2411 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2413 /* Unknown mailbox command compl */
2414 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2415 "(%d):0323 Unknown Mailbox command "
2416 "x%x (x%x/x%x) Cmpl\n",
2417 pmb->vport ? pmb->vport->vpi : 0,
2419 lpfc_sli_config_mbox_subsys_get(phba,
2421 lpfc_sli_config_mbox_opcode_get(phba,
2423 phba->link_state = LPFC_HBA_ERROR;
2424 phba->work_hs = HS_FFER3;
2425 lpfc_handle_eratt(phba);
2429 if (pmbox->mbxStatus) {
2430 phba->sli.slistat.mbox_stat_err++;
2431 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2432 /* Mbox cmd cmpl error - RETRYing */
2433 lpfc_printf_log(phba, KERN_INFO,
2435 "(%d):0305 Mbox cmd cmpl "
2436 "error - RETRYing Data: x%x "
2437 "(x%x/x%x) x%x x%x x%x\n",
2438 pmb->vport ? pmb->vport->vpi : 0,
2440 lpfc_sli_config_mbox_subsys_get(phba,
2442 lpfc_sli_config_mbox_opcode_get(phba,
2445 pmbox->un.varWords[0],
2446 pmb->vport->port_state);
2447 pmbox->mbxStatus = 0;
2448 pmbox->mbxOwner = OWN_HOST;
2449 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2450 if (rc != MBX_NOT_FINISHED)
2455 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2456 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2457 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2458 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2460 pmb->vport ? pmb->vport->vpi : 0,
2462 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2463 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2465 *((uint32_t *) pmbox),
2466 pmbox->un.varWords[0],
2467 pmbox->un.varWords[1],
2468 pmbox->un.varWords[2],
2469 pmbox->un.varWords[3],
2470 pmbox->un.varWords[4],
2471 pmbox->un.varWords[5],
2472 pmbox->un.varWords[6],
2473 pmbox->un.varWords[7],
2474 pmbox->un.varWords[8],
2475 pmbox->un.varWords[9],
2476 pmbox->un.varWords[10]);
2479 pmb->mbox_cmpl(phba,pmb);
2485 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2486 * @phba: Pointer to HBA context object.
2487 * @pring: Pointer to driver SLI ring object.
2490 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2491 * is set in the tag the buffer is posted for a particular exchange,
2492 * the function will return the buffer without replacing the buffer.
2493 * If the buffer is for unsolicited ELS or CT traffic, this function
2494 * returns the buffer and also posts another buffer to the firmware.
2496 static struct lpfc_dmabuf *
2497 lpfc_sli_get_buff(struct lpfc_hba *phba,
2498 struct lpfc_sli_ring *pring,
2501 struct hbq_dmabuf *hbq_entry;
2503 if (tag & QUE_BUFTAG_BIT)
2504 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2505 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2508 return &hbq_entry->dbuf;
2512 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2513 * @phba: Pointer to HBA context object.
2514 * @pring: Pointer to driver SLI ring object.
2515 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2516 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2517 * @fch_type: the type for the first frame of the sequence.
2519 * This function is called with no lock held. This function uses the r_ctl and
2520 * type of the received sequence to find the correct callback function to call
2521 * to process the sequence.
2524 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2525 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2532 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2538 /* unSolicited Responses */
2539 if (pring->prt[0].profile) {
2540 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2541 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2545 /* We must search, based on rctl / type
2546 for the right routine */
2547 for (i = 0; i < pring->num_mask; i++) {
2548 if ((pring->prt[i].rctl == fch_r_ctl) &&
2549 (pring->prt[i].type == fch_type)) {
2550 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2551 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2552 (phba, pring, saveq);
2560 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2561 * @phba: Pointer to HBA context object.
2562 * @pring: Pointer to driver SLI ring object.
2563 * @saveq: Pointer to the unsolicited iocb.
2565 * This function is called with no lock held by the ring event handler
2566 * when there is an unsolicited iocb posted to the response ring by the
2567 * firmware. This function gets the buffer associated with the iocbs
2568 * and calls the event handler for the ring. This function handles both
2569 * qring buffers and hbq buffers.
2570 * When the function returns 1 the caller can free the iocb object otherwise
2571 * upper layer functions will free the iocb objects.
2574 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2575 struct lpfc_iocbq *saveq)
2579 uint32_t Rctl, Type;
2580 struct lpfc_iocbq *iocbq;
2581 struct lpfc_dmabuf *dmzbuf;
2583 irsp = &(saveq->iocb);
2585 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2586 if (pring->lpfc_sli_rcv_async_status)
2587 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2589 lpfc_printf_log(phba,
2592 "0316 Ring %d handler: unexpected "
2593 "ASYNC_STATUS iocb received evt_code "
2596 irsp->un.asyncstat.evt_code);
2600 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2601 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2602 if (irsp->ulpBdeCount > 0) {
2603 dmzbuf = lpfc_sli_get_buff(phba, pring,
2604 irsp->un.ulpWord[3]);
2605 lpfc_in_buf_free(phba, dmzbuf);
2608 if (irsp->ulpBdeCount > 1) {
2609 dmzbuf = lpfc_sli_get_buff(phba, pring,
2610 irsp->unsli3.sli3Words[3]);
2611 lpfc_in_buf_free(phba, dmzbuf);
2614 if (irsp->ulpBdeCount > 2) {
2615 dmzbuf = lpfc_sli_get_buff(phba, pring,
2616 irsp->unsli3.sli3Words[7]);
2617 lpfc_in_buf_free(phba, dmzbuf);
2623 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2624 if (irsp->ulpBdeCount != 0) {
2625 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2626 irsp->un.ulpWord[3]);
2627 if (!saveq->context2)
2628 lpfc_printf_log(phba,
2631 "0341 Ring %d Cannot find buffer for "
2632 "an unsolicited iocb. tag 0x%x\n",
2634 irsp->un.ulpWord[3]);
2636 if (irsp->ulpBdeCount == 2) {
2637 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2638 irsp->unsli3.sli3Words[7]);
2639 if (!saveq->context3)
2640 lpfc_printf_log(phba,
2643 "0342 Ring %d Cannot find buffer for an"
2644 " unsolicited iocb. tag 0x%x\n",
2646 irsp->unsli3.sli3Words[7]);
2648 list_for_each_entry(iocbq, &saveq->list, list) {
2649 irsp = &(iocbq->iocb);
2650 if (irsp->ulpBdeCount != 0) {
2651 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2652 irsp->un.ulpWord[3]);
2653 if (!iocbq->context2)
2654 lpfc_printf_log(phba,
2657 "0343 Ring %d Cannot find "
2658 "buffer for an unsolicited iocb"
2659 ". tag 0x%x\n", pring->ringno,
2660 irsp->un.ulpWord[3]);
2662 if (irsp->ulpBdeCount == 2) {
2663 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2664 irsp->unsli3.sli3Words[7]);
2665 if (!iocbq->context3)
2666 lpfc_printf_log(phba,
2669 "0344 Ring %d Cannot find "
2670 "buffer for an unsolicited "
2673 irsp->unsli3.sli3Words[7]);
2677 if (irsp->ulpBdeCount != 0 &&
2678 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2679 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2682 /* search continue save q for same XRI */
2683 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2684 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2685 saveq->iocb.unsli3.rcvsli3.ox_id) {
2686 list_add_tail(&saveq->list, &iocbq->list);
2692 list_add_tail(&saveq->clist,
2693 &pring->iocb_continue_saveq);
2694 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2695 list_del_init(&iocbq->clist);
2697 irsp = &(saveq->iocb);
2701 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2702 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2703 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2704 Rctl = FC_RCTL_ELS_REQ;
2707 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2708 Rctl = w5p->hcsw.Rctl;
2709 Type = w5p->hcsw.Type;
2711 /* Firmware Workaround */
2712 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2713 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2714 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2715 Rctl = FC_RCTL_ELS_REQ;
2717 w5p->hcsw.Rctl = Rctl;
2718 w5p->hcsw.Type = Type;
2722 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2723 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2724 "0313 Ring %d handler: unexpected Rctl x%x "
2725 "Type x%x received\n",
2726 pring->ringno, Rctl, Type);
2732 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2733 * @phba: Pointer to HBA context object.
2734 * @pring: Pointer to driver SLI ring object.
2735 * @prspiocb: Pointer to response iocb object.
2737 * This function looks up the iocb_lookup table to get the command iocb
2738 * corresponding to the given response iocb using the iotag of the
2739 * response iocb. This function is called with the hbalock held.
2740 * This function returns the command iocb object if it finds the command
2741 * iocb else returns NULL.
2743 static struct lpfc_iocbq *
2744 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2745 struct lpfc_sli_ring *pring,
2746 struct lpfc_iocbq *prspiocb)
2748 struct lpfc_iocbq *cmd_iocb = NULL;
2750 lockdep_assert_held(&phba->hbalock);
2752 iotag = prspiocb->iocb.ulpIoTag;
2754 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2755 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2756 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2757 /* remove from txcmpl queue list */
2758 list_del_init(&cmd_iocb->list);
2759 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2765 "0317 iotag x%x is out of "
2766 "range: max iotag x%x wd0 x%x\n",
2767 iotag, phba->sli.last_iotag,
2768 *(((uint32_t *) &prspiocb->iocb) + 7));
2773 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2774 * @phba: Pointer to HBA context object.
2775 * @pring: Pointer to driver SLI ring object.
2778 * This function looks up the iocb_lookup table to get the command iocb
2779 * corresponding to the given iotag. This function is called with the
2781 * This function returns the command iocb object if it finds the command
2782 * iocb else returns NULL.
2784 static struct lpfc_iocbq *
2785 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2786 struct lpfc_sli_ring *pring, uint16_t iotag)
2788 struct lpfc_iocbq *cmd_iocb = NULL;
2790 lockdep_assert_held(&phba->hbalock);
2791 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2792 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2793 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2794 /* remove from txcmpl queue list */
2795 list_del_init(&cmd_iocb->list);
2796 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2802 "0372 iotag x%x lookup error: max iotag (x%x) "
2804 iotag, phba->sli.last_iotag,
2805 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2810 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2811 * @phba: Pointer to HBA context object.
2812 * @pring: Pointer to driver SLI ring object.
2813 * @saveq: Pointer to the response iocb to be processed.
2815 * This function is called by the ring event handler for non-fcp
2816 * rings when there is a new response iocb in the response ring.
2817 * The caller is not required to hold any locks. This function
2818 * gets the command iocb associated with the response iocb and
2819 * calls the completion handler for the command iocb. If there
2820 * is no completion handler, the function will free the resources
2821 * associated with command iocb. If the response iocb is for
2822 * an already aborted command iocb, the status of the completion
2823 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2824 * This function always returns 1.
2827 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2828 struct lpfc_iocbq *saveq)
2830 struct lpfc_iocbq *cmdiocbp;
2832 unsigned long iflag;
2834 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2835 spin_lock_irqsave(&phba->hbalock, iflag);
2836 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2837 spin_unlock_irqrestore(&phba->hbalock, iflag);
2840 if (cmdiocbp->iocb_cmpl) {
2842 * If an ELS command failed send an event to mgmt
2845 if (saveq->iocb.ulpStatus &&
2846 (pring->ringno == LPFC_ELS_RING) &&
2847 (cmdiocbp->iocb.ulpCommand ==
2848 CMD_ELS_REQUEST64_CR))
2849 lpfc_send_els_failure_event(phba,
2853 * Post all ELS completions to the worker thread.
2854 * All other are passed to the completion callback.
2856 if (pring->ringno == LPFC_ELS_RING) {
2857 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2858 (cmdiocbp->iocb_flag &
2859 LPFC_DRIVER_ABORTED)) {
2860 spin_lock_irqsave(&phba->hbalock,
2862 cmdiocbp->iocb_flag &=
2863 ~LPFC_DRIVER_ABORTED;
2864 spin_unlock_irqrestore(&phba->hbalock,
2866 saveq->iocb.ulpStatus =
2867 IOSTAT_LOCAL_REJECT;
2868 saveq->iocb.un.ulpWord[4] =
2871 /* Firmware could still be in progress
2872 * of DMAing payload, so don't free data
2873 * buffer till after a hbeat.
2875 spin_lock_irqsave(&phba->hbalock,
2877 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2878 spin_unlock_irqrestore(&phba->hbalock,
2881 if (phba->sli_rev == LPFC_SLI_REV4) {
2882 if (saveq->iocb_flag &
2883 LPFC_EXCHANGE_BUSY) {
2884 /* Set cmdiocb flag for the
2885 * exchange busy so sgl (xri)
2886 * will not be released until
2887 * the abort xri is received
2891 &phba->hbalock, iflag);
2892 cmdiocbp->iocb_flag |=
2894 spin_unlock_irqrestore(
2895 &phba->hbalock, iflag);
2897 if (cmdiocbp->iocb_flag &
2898 LPFC_DRIVER_ABORTED) {
2900 * Clear LPFC_DRIVER_ABORTED
2901 * bit in case it was driver
2905 &phba->hbalock, iflag);
2906 cmdiocbp->iocb_flag &=
2907 ~LPFC_DRIVER_ABORTED;
2908 spin_unlock_irqrestore(
2909 &phba->hbalock, iflag);
2910 cmdiocbp->iocb.ulpStatus =
2911 IOSTAT_LOCAL_REJECT;
2912 cmdiocbp->iocb.un.ulpWord[4] =
2913 IOERR_ABORT_REQUESTED;
2915 * For SLI4, irsiocb contains
2916 * NO_XRI in sli_xritag, it
2917 * shall not affect releasing
2918 * sgl (xri) process.
2920 saveq->iocb.ulpStatus =
2921 IOSTAT_LOCAL_REJECT;
2922 saveq->iocb.un.ulpWord[4] =
2925 &phba->hbalock, iflag);
2927 LPFC_DELAY_MEM_FREE;
2928 spin_unlock_irqrestore(
2929 &phba->hbalock, iflag);
2933 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2935 lpfc_sli_release_iocbq(phba, cmdiocbp);
2938 * Unknown initiating command based on the response iotag.
2939 * This could be the case on the ELS ring because of
2942 if (pring->ringno != LPFC_ELS_RING) {
2944 * Ring <ringno> handler: unexpected completion IoTag
2947 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2948 "0322 Ring %d handler: "
2949 "unexpected completion IoTag x%x "
2950 "Data: x%x x%x x%x x%x\n",
2952 saveq->iocb.ulpIoTag,
2953 saveq->iocb.ulpStatus,
2954 saveq->iocb.un.ulpWord[4],
2955 saveq->iocb.ulpCommand,
2956 saveq->iocb.ulpContext);
2964 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2965 * @phba: Pointer to HBA context object.
2966 * @pring: Pointer to driver SLI ring object.
2968 * This function is called from the iocb ring event handlers when
2969 * put pointer is ahead of the get pointer for a ring. This function signal
2970 * an error attention condition to the worker thread and the worker
2971 * thread will transition the HBA to offline state.
2974 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2976 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2978 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2979 * rsp ring <portRspMax>
2981 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2982 "0312 Ring %d handler: portRspPut %d "
2983 "is bigger than rsp ring %d\n",
2984 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2985 pring->sli.sli3.numRiocb);
2987 phba->link_state = LPFC_HBA_ERROR;
2990 * All error attention handlers are posted to
2993 phba->work_ha |= HA_ERATT;
2994 phba->work_hs = HS_FFER3;
2996 lpfc_worker_wake_up(phba);
3002 * lpfc_poll_eratt - Error attention polling timer timeout handler
3003 * @ptr: Pointer to address of HBA context object.
3005 * This function is invoked by the Error Attention polling timer when the
3006 * timer times out. It will check the SLI Error Attention register for
3007 * possible attention events. If so, it will post an Error Attention event
3008 * and wake up worker thread to process it. Otherwise, it will set up the
3009 * Error Attention polling timer for the next poll.
3011 void lpfc_poll_eratt(unsigned long ptr)
3013 struct lpfc_hba *phba;
3015 uint64_t sli_intr, cnt;
3017 phba = (struct lpfc_hba *)ptr;
3019 /* Here we will also keep track of interrupts per sec of the hba */
3020 sli_intr = phba->sli.slistat.sli_intr;
3022 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3023 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3026 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3028 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3029 do_div(cnt, phba->eratt_poll_interval);
3030 phba->sli.slistat.sli_ips = cnt;
3032 phba->sli.slistat.sli_prev_intr = sli_intr;
3034 /* Check chip HA register for error event */
3035 eratt = lpfc_sli_check_eratt(phba);
3038 /* Tell the worker thread there is work to do */
3039 lpfc_worker_wake_up(phba);
3041 /* Restart the timer for next eratt poll */
3042 mod_timer(&phba->eratt_poll,
3044 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3050 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3051 * @phba: Pointer to HBA context object.
3052 * @pring: Pointer to driver SLI ring object.
3053 * @mask: Host attention register mask for this ring.
3055 * This function is called from the interrupt context when there is a ring
3056 * event for the fcp ring. The caller does not hold any lock.
3057 * The function processes each response iocb in the response ring until it
3058 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3059 * LE bit set. The function will call the completion handler of the command iocb
3060 * if the response iocb indicates a completion for a command iocb or it is
3061 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3062 * function if this is an unsolicited iocb.
3063 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3064 * to check it explicitly.
3067 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3068 struct lpfc_sli_ring *pring, uint32_t mask)
3070 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3071 IOCB_t *irsp = NULL;
3072 IOCB_t *entry = NULL;
3073 struct lpfc_iocbq *cmdiocbq = NULL;
3074 struct lpfc_iocbq rspiocbq;
3076 uint32_t portRspPut, portRspMax;
3078 lpfc_iocb_type type;
3079 unsigned long iflag;
3080 uint32_t rsp_cmpl = 0;
3082 spin_lock_irqsave(&phba->hbalock, iflag);
3083 pring->stats.iocb_event++;
3086 * The next available response entry should never exceed the maximum
3087 * entries. If it does, treat it as an adapter hardware error.
3089 portRspMax = pring->sli.sli3.numRiocb;
3090 portRspPut = le32_to_cpu(pgp->rspPutInx);
3091 if (unlikely(portRspPut >= portRspMax)) {
3092 lpfc_sli_rsp_pointers_error(phba, pring);
3093 spin_unlock_irqrestore(&phba->hbalock, iflag);
3096 if (phba->fcp_ring_in_use) {
3097 spin_unlock_irqrestore(&phba->hbalock, iflag);
3100 phba->fcp_ring_in_use = 1;
3103 while (pring->sli.sli3.rspidx != portRspPut) {
3105 * Fetch an entry off the ring and copy it into a local data
3106 * structure. The copy involves a byte-swap since the
3107 * network byte order and pci byte orders are different.
3109 entry = lpfc_resp_iocb(phba, pring);
3110 phba->last_completion_time = jiffies;
3112 if (++pring->sli.sli3.rspidx >= portRspMax)
3113 pring->sli.sli3.rspidx = 0;
3115 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3116 (uint32_t *) &rspiocbq.iocb,
3117 phba->iocb_rsp_size);
3118 INIT_LIST_HEAD(&(rspiocbq.list));
3119 irsp = &rspiocbq.iocb;
3121 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3122 pring->stats.iocb_rsp++;
3125 if (unlikely(irsp->ulpStatus)) {
3127 * If resource errors reported from HBA, reduce
3128 * queuedepths of the SCSI device.
3130 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3131 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3132 IOERR_NO_RESOURCES)) {
3133 spin_unlock_irqrestore(&phba->hbalock, iflag);
3134 phba->lpfc_rampdown_queue_depth(phba);
3135 spin_lock_irqsave(&phba->hbalock, iflag);
3138 /* Rsp ring <ringno> error: IOCB */
3139 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3140 "0336 Rsp Ring %d error: IOCB Data: "
3141 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3143 irsp->un.ulpWord[0],
3144 irsp->un.ulpWord[1],
3145 irsp->un.ulpWord[2],
3146 irsp->un.ulpWord[3],
3147 irsp->un.ulpWord[4],
3148 irsp->un.ulpWord[5],
3149 *(uint32_t *)&irsp->un1,
3150 *((uint32_t *)&irsp->un1 + 1));
3154 case LPFC_ABORT_IOCB:
3157 * Idle exchange closed via ABTS from port. No iocb
3158 * resources need to be recovered.
3160 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3161 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3162 "0333 IOCB cmd 0x%x"
3163 " processed. Skipping"
3169 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3171 if (unlikely(!cmdiocbq))
3173 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3174 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3175 if (cmdiocbq->iocb_cmpl) {
3176 spin_unlock_irqrestore(&phba->hbalock, iflag);
3177 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3179 spin_lock_irqsave(&phba->hbalock, iflag);
3182 case LPFC_UNSOL_IOCB:
3183 spin_unlock_irqrestore(&phba->hbalock, iflag);
3184 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3185 spin_lock_irqsave(&phba->hbalock, iflag);
3188 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3189 char adaptermsg[LPFC_MAX_ADPTMSG];
3190 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3191 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3193 dev_warn(&((phba->pcidev)->dev),
3195 phba->brd_no, adaptermsg);
3197 /* Unknown IOCB command */
3198 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3199 "0334 Unknown IOCB command "
3200 "Data: x%x, x%x x%x x%x x%x\n",
3201 type, irsp->ulpCommand,
3210 * The response IOCB has been processed. Update the ring
3211 * pointer in SLIM. If the port response put pointer has not
3212 * been updated, sync the pgp->rspPutInx and fetch the new port
3213 * response put pointer.
3215 writel(pring->sli.sli3.rspidx,
3216 &phba->host_gp[pring->ringno].rspGetInx);
3218 if (pring->sli.sli3.rspidx == portRspPut)
3219 portRspPut = le32_to_cpu(pgp->rspPutInx);
3222 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3223 pring->stats.iocb_rsp_full++;
3224 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3225 writel(status, phba->CAregaddr);
3226 readl(phba->CAregaddr);
3228 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3229 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3230 pring->stats.iocb_cmd_empty++;
3232 /* Force update of the local copy of cmdGetInx */
3233 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3234 lpfc_sli_resume_iocb(phba, pring);
3236 if ((pring->lpfc_sli_cmd_available))
3237 (pring->lpfc_sli_cmd_available) (phba, pring);
3241 phba->fcp_ring_in_use = 0;
3242 spin_unlock_irqrestore(&phba->hbalock, iflag);
3247 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3248 * @phba: Pointer to HBA context object.
3249 * @pring: Pointer to driver SLI ring object.
3250 * @rspiocbp: Pointer to driver response IOCB object.
3252 * This function is called from the worker thread when there is a slow-path
3253 * response IOCB to process. This function chains all the response iocbs until
3254 * seeing the iocb with the LE bit set. The function will call
3255 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3256 * completion of a command iocb. The function will call the
3257 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3258 * The function frees the resources or calls the completion handler if this
3259 * iocb is an abort completion. The function returns NULL when the response
3260 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3261 * this function shall chain the iocb on to the iocb_continueq and return the
3262 * response iocb passed in.
3264 static struct lpfc_iocbq *
3265 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3266 struct lpfc_iocbq *rspiocbp)
3268 struct lpfc_iocbq *saveq;
3269 struct lpfc_iocbq *cmdiocbp;
3270 struct lpfc_iocbq *next_iocb;
3271 IOCB_t *irsp = NULL;
3272 uint32_t free_saveq;
3273 uint8_t iocb_cmd_type;
3274 lpfc_iocb_type type;
3275 unsigned long iflag;
3278 spin_lock_irqsave(&phba->hbalock, iflag);
3279 /* First add the response iocb to the countinueq list */
3280 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3281 pring->iocb_continueq_cnt++;
3283 /* Now, determine whether the list is completed for processing */
3284 irsp = &rspiocbp->iocb;
3287 * By default, the driver expects to free all resources
3288 * associated with this iocb completion.
3291 saveq = list_get_first(&pring->iocb_continueq,
3292 struct lpfc_iocbq, list);
3293 irsp = &(saveq->iocb);
3294 list_del_init(&pring->iocb_continueq);
3295 pring->iocb_continueq_cnt = 0;
3297 pring->stats.iocb_rsp++;
3300 * If resource errors reported from HBA, reduce
3301 * queuedepths of the SCSI device.
3303 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3304 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3305 IOERR_NO_RESOURCES)) {
3306 spin_unlock_irqrestore(&phba->hbalock, iflag);
3307 phba->lpfc_rampdown_queue_depth(phba);
3308 spin_lock_irqsave(&phba->hbalock, iflag);
3311 if (irsp->ulpStatus) {
3312 /* Rsp ring <ringno> error: IOCB */
3313 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3314 "0328 Rsp Ring %d error: "
3319 "x%x x%x x%x x%x\n",
3321 irsp->un.ulpWord[0],
3322 irsp->un.ulpWord[1],
3323 irsp->un.ulpWord[2],
3324 irsp->un.ulpWord[3],
3325 irsp->un.ulpWord[4],
3326 irsp->un.ulpWord[5],
3327 *(((uint32_t *) irsp) + 6),
3328 *(((uint32_t *) irsp) + 7),
3329 *(((uint32_t *) irsp) + 8),
3330 *(((uint32_t *) irsp) + 9),
3331 *(((uint32_t *) irsp) + 10),
3332 *(((uint32_t *) irsp) + 11),
3333 *(((uint32_t *) irsp) + 12),
3334 *(((uint32_t *) irsp) + 13),
3335 *(((uint32_t *) irsp) + 14),
3336 *(((uint32_t *) irsp) + 15));
3340 * Fetch the IOCB command type and call the correct completion
3341 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3342 * get freed back to the lpfc_iocb_list by the discovery
3345 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3346 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3349 spin_unlock_irqrestore(&phba->hbalock, iflag);
3350 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3351 spin_lock_irqsave(&phba->hbalock, iflag);
3354 case LPFC_UNSOL_IOCB:
3355 spin_unlock_irqrestore(&phba->hbalock, iflag);
3356 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3357 spin_lock_irqsave(&phba->hbalock, iflag);
3362 case LPFC_ABORT_IOCB:
3364 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3365 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3368 /* Call the specified completion routine */
3369 if (cmdiocbp->iocb_cmpl) {
3370 spin_unlock_irqrestore(&phba->hbalock,
3372 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3374 spin_lock_irqsave(&phba->hbalock,
3377 __lpfc_sli_release_iocbq(phba,
3382 case LPFC_UNKNOWN_IOCB:
3383 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3384 char adaptermsg[LPFC_MAX_ADPTMSG];
3385 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3386 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3388 dev_warn(&((phba->pcidev)->dev),
3390 phba->brd_no, adaptermsg);
3392 /* Unknown IOCB command */
3393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3394 "0335 Unknown IOCB "
3395 "command Data: x%x "
3406 list_for_each_entry_safe(rspiocbp, next_iocb,
3407 &saveq->list, list) {
3408 list_del_init(&rspiocbp->list);
3409 __lpfc_sli_release_iocbq(phba, rspiocbp);
3411 __lpfc_sli_release_iocbq(phba, saveq);
3415 spin_unlock_irqrestore(&phba->hbalock, iflag);
3420 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3421 * @phba: Pointer to HBA context object.
3422 * @pring: Pointer to driver SLI ring object.
3423 * @mask: Host attention register mask for this ring.
3425 * This routine wraps the actual slow_ring event process routine from the
3426 * API jump table function pointer from the lpfc_hba struct.
3429 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3430 struct lpfc_sli_ring *pring, uint32_t mask)
3432 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3436 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3437 * @phba: Pointer to HBA context object.
3438 * @pring: Pointer to driver SLI ring object.
3439 * @mask: Host attention register mask for this ring.
3441 * This function is called from the worker thread when there is a ring event
3442 * for non-fcp rings. The caller does not hold any lock. The function will
3443 * remove each response iocb in the response ring and calls the handle
3444 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3447 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3448 struct lpfc_sli_ring *pring, uint32_t mask)
3450 struct lpfc_pgp *pgp;
3452 IOCB_t *irsp = NULL;
3453 struct lpfc_iocbq *rspiocbp = NULL;
3454 uint32_t portRspPut, portRspMax;
3455 unsigned long iflag;
3458 pgp = &phba->port_gp[pring->ringno];
3459 spin_lock_irqsave(&phba->hbalock, iflag);
3460 pring->stats.iocb_event++;
3463 * The next available response entry should never exceed the maximum
3464 * entries. If it does, treat it as an adapter hardware error.
3466 portRspMax = pring->sli.sli3.numRiocb;
3467 portRspPut = le32_to_cpu(pgp->rspPutInx);
3468 if (portRspPut >= portRspMax) {
3470 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3471 * rsp ring <portRspMax>
3473 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3474 "0303 Ring %d handler: portRspPut %d "
3475 "is bigger than rsp ring %d\n",
3476 pring->ringno, portRspPut, portRspMax);
3478 phba->link_state = LPFC_HBA_ERROR;
3479 spin_unlock_irqrestore(&phba->hbalock, iflag);
3481 phba->work_hs = HS_FFER3;
3482 lpfc_handle_eratt(phba);
3488 while (pring->sli.sli3.rspidx != portRspPut) {
3490 * Build a completion list and call the appropriate handler.
3491 * The process is to get the next available response iocb, get
3492 * a free iocb from the list, copy the response data into the
3493 * free iocb, insert to the continuation list, and update the
3494 * next response index to slim. This process makes response
3495 * iocb's in the ring available to DMA as fast as possible but
3496 * pays a penalty for a copy operation. Since the iocb is
3497 * only 32 bytes, this penalty is considered small relative to
3498 * the PCI reads for register values and a slim write. When
3499 * the ulpLe field is set, the entire Command has been
3502 entry = lpfc_resp_iocb(phba, pring);
3504 phba->last_completion_time = jiffies;
3505 rspiocbp = __lpfc_sli_get_iocbq(phba);
3506 if (rspiocbp == NULL) {
3507 printk(KERN_ERR "%s: out of buffers! Failing "
3508 "completion.\n", __func__);
3512 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3513 phba->iocb_rsp_size);
3514 irsp = &rspiocbp->iocb;
3516 if (++pring->sli.sli3.rspidx >= portRspMax)
3517 pring->sli.sli3.rspidx = 0;
3519 if (pring->ringno == LPFC_ELS_RING) {
3520 lpfc_debugfs_slow_ring_trc(phba,
3521 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3522 *(((uint32_t *) irsp) + 4),
3523 *(((uint32_t *) irsp) + 6),
3524 *(((uint32_t *) irsp) + 7));
3527 writel(pring->sli.sli3.rspidx,
3528 &phba->host_gp[pring->ringno].rspGetInx);
3530 spin_unlock_irqrestore(&phba->hbalock, iflag);
3531 /* Handle the response IOCB */
3532 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3533 spin_lock_irqsave(&phba->hbalock, iflag);
3536 * If the port response put pointer has not been updated, sync
3537 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3538 * response put pointer.
3540 if (pring->sli.sli3.rspidx == portRspPut) {
3541 portRspPut = le32_to_cpu(pgp->rspPutInx);
3543 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3545 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3546 /* At least one response entry has been freed */
3547 pring->stats.iocb_rsp_full++;
3548 /* SET RxRE_RSP in Chip Att register */
3549 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3550 writel(status, phba->CAregaddr);
3551 readl(phba->CAregaddr); /* flush */
3553 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3554 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3555 pring->stats.iocb_cmd_empty++;
3557 /* Force update of the local copy of cmdGetInx */
3558 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3559 lpfc_sli_resume_iocb(phba, pring);
3561 if ((pring->lpfc_sli_cmd_available))
3562 (pring->lpfc_sli_cmd_available) (phba, pring);
3566 spin_unlock_irqrestore(&phba->hbalock, iflag);
3571 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3572 * @phba: Pointer to HBA context object.
3573 * @pring: Pointer to driver SLI ring object.
3574 * @mask: Host attention register mask for this ring.
3576 * This function is called from the worker thread when there is a pending
3577 * ELS response iocb on the driver internal slow-path response iocb worker
3578 * queue. The caller does not hold any lock. The function will remove each
3579 * response iocb from the response worker queue and calls the handle
3580 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3583 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3584 struct lpfc_sli_ring *pring, uint32_t mask)
3586 struct lpfc_iocbq *irspiocbq;
3587 struct hbq_dmabuf *dmabuf;
3588 struct lpfc_cq_event *cq_event;
3589 unsigned long iflag;
3592 spin_lock_irqsave(&phba->hbalock, iflag);
3593 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3594 spin_unlock_irqrestore(&phba->hbalock, iflag);
3595 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3596 /* Get the response iocb from the head of work queue */
3597 spin_lock_irqsave(&phba->hbalock, iflag);
3598 list_remove_head(&phba->sli4_hba.sp_queue_event,
3599 cq_event, struct lpfc_cq_event, list);
3600 spin_unlock_irqrestore(&phba->hbalock, iflag);
3602 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3603 case CQE_CODE_COMPL_WQE:
3604 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3606 /* Translate ELS WCQE to response IOCBQ */
3607 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3610 lpfc_sli_sp_handle_rspiocb(phba, pring,
3614 case CQE_CODE_RECEIVE:
3615 case CQE_CODE_RECEIVE_V1:
3616 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3618 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3625 /* Limit the number of events to 64 to avoid soft lockups */
3632 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3633 * @phba: Pointer to HBA context object.
3634 * @pring: Pointer to driver SLI ring object.
3636 * This function aborts all iocbs in the given ring and frees all the iocb
3637 * objects in txq. This function issues an abort iocb for all the iocb commands
3638 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3639 * the return of this function. The caller is not required to hold any locks.
3642 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3644 LIST_HEAD(completions);
3645 struct lpfc_iocbq *iocb, *next_iocb;
3647 if (pring->ringno == LPFC_ELS_RING) {
3648 lpfc_fabric_abort_hba(phba);
3651 /* Error everything on txq and txcmplq
3654 if (phba->sli_rev >= LPFC_SLI_REV4) {
3655 spin_lock_irq(&pring->ring_lock);
3656 list_splice_init(&pring->txq, &completions);
3658 spin_unlock_irq(&pring->ring_lock);
3660 spin_lock_irq(&phba->hbalock);
3661 /* Next issue ABTS for everything on the txcmplq */
3662 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3663 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3664 spin_unlock_irq(&phba->hbalock);
3666 spin_lock_irq(&phba->hbalock);
3667 list_splice_init(&pring->txq, &completions);
3670 /* Next issue ABTS for everything on the txcmplq */
3671 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3672 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3673 spin_unlock_irq(&phba->hbalock);
3676 /* Cancel all the IOCBs from the completions list */
3677 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3682 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3683 * @phba: Pointer to HBA context object.
3684 * @pring: Pointer to driver SLI ring object.
3686 * This function aborts all iocbs in the given ring and frees all the iocb
3687 * objects in txq. This function issues an abort iocb for all the iocb commands
3688 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3689 * the return of this function. The caller is not required to hold any locks.
3692 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3694 LIST_HEAD(completions);
3695 struct lpfc_iocbq *iocb, *next_iocb;
3697 if (pring->ringno == LPFC_ELS_RING)
3698 lpfc_fabric_abort_hba(phba);
3700 spin_lock_irq(&phba->hbalock);
3701 /* Next issue ABTS for everything on the txcmplq */
3702 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3703 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3704 spin_unlock_irq(&phba->hbalock);
3709 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3710 * @phba: Pointer to HBA context object.
3711 * @pring: Pointer to driver SLI ring object.
3713 * This function aborts all iocbs in FCP rings and frees all the iocb
3714 * objects in txq. This function issues an abort iocb for all the iocb commands
3715 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3716 * the return of this function. The caller is not required to hold any locks.
3719 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3721 struct lpfc_sli *psli = &phba->sli;
3722 struct lpfc_sli_ring *pring;
3725 /* Look on all the FCP Rings for the iotag */
3726 if (phba->sli_rev >= LPFC_SLI_REV4) {
3727 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3728 pring = phba->sli4_hba.fcp_wq[i]->pring;
3729 lpfc_sli_abort_iocb_ring(phba, pring);
3732 pring = &psli->sli3_ring[LPFC_FCP_RING];
3733 lpfc_sli_abort_iocb_ring(phba, pring);
3738 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3739 * @phba: Pointer to HBA context object.
3741 * This function aborts all wqes in NVME rings. This function issues an
3742 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3743 * the txcmplq is not guaranteed to complete before the return of this
3744 * function. The caller is not required to hold any locks.
3747 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3749 struct lpfc_sli_ring *pring;
3752 if (phba->sli_rev < LPFC_SLI_REV4)
3755 /* Abort all IO on each NVME ring. */
3756 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3757 pring = phba->sli4_hba.nvme_wq[i]->pring;
3758 lpfc_sli_abort_wqe_ring(phba, pring);
3764 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3765 * @phba: Pointer to HBA context object.
3767 * This function flushes all iocbs in the fcp ring and frees all the iocb
3768 * objects in txq and txcmplq. This function will not issue abort iocbs
3769 * for all the iocb commands in txcmplq, they will just be returned with
3770 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3771 * slot has been permanently disabled.
3774 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3778 struct lpfc_sli *psli = &phba->sli;
3779 struct lpfc_sli_ring *pring;
3782 spin_lock_irq(&phba->hbalock);
3783 /* Indicate the I/O queues are flushed */
3784 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3785 spin_unlock_irq(&phba->hbalock);
3787 /* Look on all the FCP Rings for the iotag */
3788 if (phba->sli_rev >= LPFC_SLI_REV4) {
3789 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3790 pring = phba->sli4_hba.fcp_wq[i]->pring;
3792 spin_lock_irq(&pring->ring_lock);
3793 /* Retrieve everything on txq */
3794 list_splice_init(&pring->txq, &txq);
3795 /* Retrieve everything on the txcmplq */
3796 list_splice_init(&pring->txcmplq, &txcmplq);
3798 pring->txcmplq_cnt = 0;
3799 spin_unlock_irq(&pring->ring_lock);
3802 lpfc_sli_cancel_iocbs(phba, &txq,
3803 IOSTAT_LOCAL_REJECT,
3805 /* Flush the txcmpq */
3806 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3807 IOSTAT_LOCAL_REJECT,
3811 pring = &psli->sli3_ring[LPFC_FCP_RING];
3813 spin_lock_irq(&phba->hbalock);
3814 /* Retrieve everything on txq */
3815 list_splice_init(&pring->txq, &txq);
3816 /* Retrieve everything on the txcmplq */
3817 list_splice_init(&pring->txcmplq, &txcmplq);
3819 pring->txcmplq_cnt = 0;
3820 spin_unlock_irq(&phba->hbalock);
3823 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3825 /* Flush the txcmpq */
3826 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3832 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3833 * @phba: Pointer to HBA context object.
3835 * This function flushes all wqes in the nvme rings and frees all resources
3836 * in the txcmplq. This function does not issue abort wqes for the IO
3837 * commands in txcmplq, they will just be returned with
3838 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3839 * slot has been permanently disabled.
3842 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3845 struct lpfc_sli_ring *pring;
3848 if (phba->sli_rev < LPFC_SLI_REV4)
3851 /* Hint to other driver operations that a flush is in progress. */
3852 spin_lock_irq(&phba->hbalock);
3853 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3854 spin_unlock_irq(&phba->hbalock);
3856 /* Cycle through all NVME rings and complete each IO with
3857 * a local driver reason code. This is a flush so no
3858 * abort exchange to FW.
3860 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3861 pring = phba->sli4_hba.nvme_wq[i]->pring;
3863 /* Retrieve everything on the txcmplq */
3864 spin_lock_irq(&pring->ring_lock);
3865 list_splice_init(&pring->txcmplq, &txcmplq);
3866 pring->txcmplq_cnt = 0;
3867 spin_unlock_irq(&pring->ring_lock);
3869 /* Flush the txcmpq &&&PAE */
3870 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3871 IOSTAT_LOCAL_REJECT,
3877 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3878 * @phba: Pointer to HBA context object.
3879 * @mask: Bit mask to be checked.
3881 * This function reads the host status register and compares
3882 * with the provided bit mask to check if HBA completed
3883 * the restart. This function will wait in a loop for the
3884 * HBA to complete restart. If the HBA does not restart within
3885 * 15 iterations, the function will reset the HBA again. The
3886 * function returns 1 when HBA fail to restart otherwise returns
3890 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3896 /* Read the HBA Host Status Register */
3897 if (lpfc_readl(phba->HSregaddr, &status))
3901 * Check status register every 100ms for 5 retries, then every
3902 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3903 * every 2.5 sec for 4.
3904 * Break our of the loop if errors occurred during init.
3906 while (((status & mask) != mask) &&
3907 !(status & HS_FFERM) &&
3919 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3920 lpfc_sli_brdrestart(phba);
3922 /* Read the HBA Host Status Register */
3923 if (lpfc_readl(phba->HSregaddr, &status)) {
3929 /* Check to see if any errors occurred during init */
3930 if ((status & HS_FFERM) || (i >= 20)) {
3931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3932 "2751 Adapter failed to restart, "
3933 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3935 readl(phba->MBslimaddr + 0xa8),
3936 readl(phba->MBslimaddr + 0xac));
3937 phba->link_state = LPFC_HBA_ERROR;
3945 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3946 * @phba: Pointer to HBA context object.
3947 * @mask: Bit mask to be checked.
3949 * This function checks the host status register to check if HBA is
3950 * ready. This function will wait in a loop for the HBA to be ready
3951 * If the HBA is not ready , the function will will reset the HBA PCI
3952 * function again. The function returns 1 when HBA fail to be ready
3953 * otherwise returns zero.
3956 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3961 /* Read the HBA Host Status Register */
3962 status = lpfc_sli4_post_status_check(phba);
3965 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3966 lpfc_sli_brdrestart(phba);
3967 status = lpfc_sli4_post_status_check(phba);
3970 /* Check to see if any errors occurred during init */
3972 phba->link_state = LPFC_HBA_ERROR;
3975 phba->sli4_hba.intr_enable = 0;
3981 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3982 * @phba: Pointer to HBA context object.
3983 * @mask: Bit mask to be checked.
3985 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3986 * from the API jump table function pointer from the lpfc_hba struct.
3989 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3991 return phba->lpfc_sli_brdready(phba, mask);
3994 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3997 * lpfc_reset_barrier - Make HBA ready for HBA reset
3998 * @phba: Pointer to HBA context object.
4000 * This function is called before resetting an HBA. This function is called
4001 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4003 void lpfc_reset_barrier(struct lpfc_hba *phba)
4005 uint32_t __iomem *resp_buf;
4006 uint32_t __iomem *mbox_buf;
4007 volatile uint32_t mbox;
4008 uint32_t hc_copy, ha_copy, resp_data;
4012 lockdep_assert_held(&phba->hbalock);
4014 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4015 if (hdrtype != 0x80 ||
4016 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4017 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4021 * Tell the other part of the chip to suspend temporarily all
4024 resp_buf = phba->MBslimaddr;
4026 /* Disable the error attention */
4027 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4029 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4030 readl(phba->HCregaddr); /* flush */
4031 phba->link_flag |= LS_IGNORE_ERATT;
4033 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4035 if (ha_copy & HA_ERATT) {
4036 /* Clear Chip error bit */
4037 writel(HA_ERATT, phba->HAregaddr);
4038 phba->pport->stopped = 1;
4042 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4043 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4045 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4046 mbox_buf = phba->MBslimaddr;
4047 writel(mbox, mbox_buf);
4049 for (i = 0; i < 50; i++) {
4050 if (lpfc_readl((resp_buf + 1), &resp_data))
4052 if (resp_data != ~(BARRIER_TEST_PATTERN))
4058 if (lpfc_readl((resp_buf + 1), &resp_data))
4060 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4061 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4062 phba->pport->stopped)
4068 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4070 for (i = 0; i < 500; i++) {
4071 if (lpfc_readl(resp_buf, &resp_data))
4073 if (resp_data != mbox)
4082 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4084 if (!(ha_copy & HA_ERATT))
4090 if (readl(phba->HAregaddr) & HA_ERATT) {
4091 writel(HA_ERATT, phba->HAregaddr);
4092 phba->pport->stopped = 1;
4096 phba->link_flag &= ~LS_IGNORE_ERATT;
4097 writel(hc_copy, phba->HCregaddr);
4098 readl(phba->HCregaddr); /* flush */
4102 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4103 * @phba: Pointer to HBA context object.
4105 * This function issues a kill_board mailbox command and waits for
4106 * the error attention interrupt. This function is called for stopping
4107 * the firmware processing. The caller is not required to hold any
4108 * locks. This function calls lpfc_hba_down_post function to free
4109 * any pending commands after the kill. The function will return 1 when it
4110 * fails to kill the board else will return 0.
4113 lpfc_sli_brdkill(struct lpfc_hba *phba)
4115 struct lpfc_sli *psli;
4125 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4126 "0329 Kill HBA Data: x%x x%x\n",
4127 phba->pport->port_state, psli->sli_flag);
4129 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4133 /* Disable the error attention */
4134 spin_lock_irq(&phba->hbalock);
4135 if (lpfc_readl(phba->HCregaddr, &status)) {
4136 spin_unlock_irq(&phba->hbalock);
4137 mempool_free(pmb, phba->mbox_mem_pool);
4140 status &= ~HC_ERINT_ENA;
4141 writel(status, phba->HCregaddr);
4142 readl(phba->HCregaddr); /* flush */
4143 phba->link_flag |= LS_IGNORE_ERATT;
4144 spin_unlock_irq(&phba->hbalock);
4146 lpfc_kill_board(phba, pmb);
4147 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4148 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4150 if (retval != MBX_SUCCESS) {
4151 if (retval != MBX_BUSY)
4152 mempool_free(pmb, phba->mbox_mem_pool);
4153 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4154 "2752 KILL_BOARD command failed retval %d\n",
4156 spin_lock_irq(&phba->hbalock);
4157 phba->link_flag &= ~LS_IGNORE_ERATT;
4158 spin_unlock_irq(&phba->hbalock);
4162 spin_lock_irq(&phba->hbalock);
4163 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4164 spin_unlock_irq(&phba->hbalock);
4166 mempool_free(pmb, phba->mbox_mem_pool);
4168 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4169 * attention every 100ms for 3 seconds. If we don't get ERATT after
4170 * 3 seconds we still set HBA_ERROR state because the status of the
4171 * board is now undefined.
4173 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4175 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4177 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4181 del_timer_sync(&psli->mbox_tmo);
4182 if (ha_copy & HA_ERATT) {
4183 writel(HA_ERATT, phba->HAregaddr);
4184 phba->pport->stopped = 1;
4186 spin_lock_irq(&phba->hbalock);
4187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4188 psli->mbox_active = NULL;
4189 phba->link_flag &= ~LS_IGNORE_ERATT;
4190 spin_unlock_irq(&phba->hbalock);
4192 lpfc_hba_down_post(phba);
4193 phba->link_state = LPFC_HBA_ERROR;
4195 return ha_copy & HA_ERATT ? 0 : 1;
4199 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4200 * @phba: Pointer to HBA context object.
4202 * This function resets the HBA by writing HC_INITFF to the control
4203 * register. After the HBA resets, this function resets all the iocb ring
4204 * indices. This function disables PCI layer parity checking during
4206 * This function returns 0 always.
4207 * The caller is not required to hold any locks.
4210 lpfc_sli_brdreset(struct lpfc_hba *phba)
4212 struct lpfc_sli *psli;
4213 struct lpfc_sli_ring *pring;
4220 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4221 "0325 Reset HBA Data: x%x x%x\n",
4222 (phba->pport) ? phba->pport->port_state : 0,
4225 /* perform board reset */
4226 phba->fc_eventTag = 0;
4227 phba->link_events = 0;
4229 phba->pport->fc_myDID = 0;
4230 phba->pport->fc_prevDID = 0;
4233 /* Turn off parity checking and serr during the physical reset */
4234 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4235 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4237 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4239 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4241 /* Now toggle INITFF bit in the Host Control Register */
4242 writel(HC_INITFF, phba->HCregaddr);
4244 readl(phba->HCregaddr); /* flush */
4245 writel(0, phba->HCregaddr);
4246 readl(phba->HCregaddr); /* flush */
4248 /* Restore PCI cmd register */
4249 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4251 /* Initialize relevant SLI info */
4252 for (i = 0; i < psli->num_rings; i++) {
4253 pring = &psli->sli3_ring[i];
4255 pring->sli.sli3.rspidx = 0;
4256 pring->sli.sli3.next_cmdidx = 0;
4257 pring->sli.sli3.local_getidx = 0;
4258 pring->sli.sli3.cmdidx = 0;
4259 pring->missbufcnt = 0;
4262 phba->link_state = LPFC_WARM_START;
4267 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4268 * @phba: Pointer to HBA context object.
4270 * This function resets a SLI4 HBA. This function disables PCI layer parity
4271 * checking during resets the device. The caller is not required to hold
4274 * This function returns 0 always.
4277 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4279 struct lpfc_sli *psli = &phba->sli;
4284 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4285 "0295 Reset HBA Data: x%x x%x x%x\n",
4286 phba->pport->port_state, psli->sli_flag,
4289 /* perform board reset */
4290 phba->fc_eventTag = 0;
4291 phba->link_events = 0;
4292 phba->pport->fc_myDID = 0;
4293 phba->pport->fc_prevDID = 0;
4295 spin_lock_irq(&phba->hbalock);
4296 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4297 phba->fcf.fcf_flag = 0;
4298 spin_unlock_irq(&phba->hbalock);
4300 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4301 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4302 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4306 /* Now physically reset the device */
4307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4308 "0389 Performing PCI function reset!\n");
4310 /* Turn off parity checking and serr during the physical reset */
4311 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4312 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4313 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4315 /* Perform FCoE PCI function reset before freeing queue memory */
4316 rc = lpfc_pci_function_reset(phba);
4318 /* Restore PCI cmd register */
4319 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4325 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4326 * @phba: Pointer to HBA context object.
4328 * This function is called in the SLI initialization code path to
4329 * restart the HBA. The caller is not required to hold any lock.
4330 * This function writes MBX_RESTART mailbox command to the SLIM and
4331 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4332 * function to free any pending commands. The function enables
4333 * POST only during the first initialization. The function returns zero.
4334 * The function does not guarantee completion of MBX_RESTART mailbox
4335 * command before the return of this function.
4338 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4341 struct lpfc_sli *psli;
4342 volatile uint32_t word0;
4343 void __iomem *to_slim;
4344 uint32_t hba_aer_enabled;
4346 spin_lock_irq(&phba->hbalock);
4348 /* Take PCIe device Advanced Error Reporting (AER) state */
4349 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4354 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4355 "0337 Restart HBA Data: x%x x%x\n",
4356 (phba->pport) ? phba->pport->port_state : 0,
4360 mb = (MAILBOX_t *) &word0;
4361 mb->mbxCommand = MBX_RESTART;
4364 lpfc_reset_barrier(phba);
4366 to_slim = phba->MBslimaddr;
4367 writel(*(uint32_t *) mb, to_slim);
4368 readl(to_slim); /* flush */
4370 /* Only skip post after fc_ffinit is completed */
4371 if (phba->pport && phba->pport->port_state)
4372 word0 = 1; /* This is really setting up word1 */
4374 word0 = 0; /* This is really setting up word1 */
4375 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4376 writel(*(uint32_t *) mb, to_slim);
4377 readl(to_slim); /* flush */
4379 lpfc_sli_brdreset(phba);
4381 phba->pport->stopped = 0;
4382 phba->link_state = LPFC_INIT_START;
4384 spin_unlock_irq(&phba->hbalock);
4386 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4387 psli->stats_start = get_seconds();
4389 /* Give the INITFF and Post time to settle. */
4392 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4393 if (hba_aer_enabled)
4394 pci_disable_pcie_error_reporting(phba->pcidev);
4396 lpfc_hba_down_post(phba);
4402 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4403 * @phba: Pointer to HBA context object.
4405 * This function is called in the SLI initialization code path to restart
4406 * a SLI4 HBA. The caller is not required to hold any lock.
4407 * At the end of the function, it calls lpfc_hba_down_post function to
4408 * free any pending commands.
4411 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4413 struct lpfc_sli *psli = &phba->sli;
4414 uint32_t hba_aer_enabled;
4418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4419 "0296 Restart HBA Data: x%x x%x\n",
4420 phba->pport->port_state, psli->sli_flag);
4422 /* Take PCIe device Advanced Error Reporting (AER) state */
4423 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4425 rc = lpfc_sli4_brdreset(phba);
4429 spin_lock_irq(&phba->hbalock);
4430 phba->pport->stopped = 0;
4431 phba->link_state = LPFC_INIT_START;
4433 spin_unlock_irq(&phba->hbalock);
4435 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4436 psli->stats_start = get_seconds();
4438 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4439 if (hba_aer_enabled)
4440 pci_disable_pcie_error_reporting(phba->pcidev);
4442 lpfc_hba_down_post(phba);
4443 lpfc_sli4_queue_destroy(phba);
4449 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4450 * @phba: Pointer to HBA context object.
4452 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4453 * API jump table function pointer from the lpfc_hba struct.
4456 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4458 return phba->lpfc_sli_brdrestart(phba);
4462 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4463 * @phba: Pointer to HBA context object.
4465 * This function is called after a HBA restart to wait for successful
4466 * restart of the HBA. Successful restart of the HBA is indicated by
4467 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4468 * iteration, the function will restart the HBA again. The function returns
4469 * zero if HBA successfully restarted else returns negative error code.
4472 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4474 uint32_t status, i = 0;
4476 /* Read the HBA Host Status Register */
4477 if (lpfc_readl(phba->HSregaddr, &status))
4480 /* Check status register to see what current state is */
4482 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4484 /* Check every 10ms for 10 retries, then every 100ms for 90
4485 * retries, then every 1 sec for 50 retires for a total of
4486 * ~60 seconds before reset the board again and check every
4487 * 1 sec for 50 retries. The up to 60 seconds before the
4488 * board ready is required by the Falcon FIPS zeroization
4489 * complete, and any reset the board in between shall cause
4490 * restart of zeroization, further delay the board ready.
4493 /* Adapter failed to init, timeout, status reg
4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4496 "0436 Adapter failed to init, "
4497 "timeout, status reg x%x, "
4498 "FW Data: A8 x%x AC x%x\n", status,
4499 readl(phba->MBslimaddr + 0xa8),
4500 readl(phba->MBslimaddr + 0xac));
4501 phba->link_state = LPFC_HBA_ERROR;
4505 /* Check to see if any errors occurred during init */
4506 if (status & HS_FFERM) {
4507 /* ERROR: During chipset initialization */
4508 /* Adapter failed to init, chipset, status reg
4510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4511 "0437 Adapter failed to init, "
4512 "chipset, status reg x%x, "
4513 "FW Data: A8 x%x AC x%x\n", status,
4514 readl(phba->MBslimaddr + 0xa8),
4515 readl(phba->MBslimaddr + 0xac));
4516 phba->link_state = LPFC_HBA_ERROR;
4529 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4530 lpfc_sli_brdrestart(phba);
4532 /* Read the HBA Host Status Register */
4533 if (lpfc_readl(phba->HSregaddr, &status))
4537 /* Check to see if any errors occurred during init */
4538 if (status & HS_FFERM) {
4539 /* ERROR: During chipset initialization */
4540 /* Adapter failed to init, chipset, status reg <status> */
4541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4542 "0438 Adapter failed to init, chipset, "
4544 "FW Data: A8 x%x AC x%x\n", status,
4545 readl(phba->MBslimaddr + 0xa8),
4546 readl(phba->MBslimaddr + 0xac));
4547 phba->link_state = LPFC_HBA_ERROR;
4551 /* Clear all interrupt enable conditions */
4552 writel(0, phba->HCregaddr);
4553 readl(phba->HCregaddr); /* flush */
4555 /* setup host attn register */
4556 writel(0xffffffff, phba->HAregaddr);
4557 readl(phba->HAregaddr); /* flush */
4562 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4564 * This function calculates and returns the number of HBQs required to be
4568 lpfc_sli_hbq_count(void)
4570 return ARRAY_SIZE(lpfc_hbq_defs);
4574 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4576 * This function adds the number of hbq entries in every HBQ to get
4577 * the total number of hbq entries required for the HBA and returns
4581 lpfc_sli_hbq_entry_count(void)
4583 int hbq_count = lpfc_sli_hbq_count();
4587 for (i = 0; i < hbq_count; ++i)
4588 count += lpfc_hbq_defs[i]->entry_count;
4593 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4595 * This function calculates amount of memory required for all hbq entries
4596 * to be configured and returns the total memory required.
4599 lpfc_sli_hbq_size(void)
4601 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4605 * lpfc_sli_hbq_setup - configure and initialize HBQs
4606 * @phba: Pointer to HBA context object.
4608 * This function is called during the SLI initialization to configure
4609 * all the HBQs and post buffers to the HBQ. The caller is not
4610 * required to hold any locks. This function will return zero if successful
4611 * else it will return negative error code.
4614 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4616 int hbq_count = lpfc_sli_hbq_count();
4620 uint32_t hbq_entry_index;
4622 /* Get a Mailbox buffer to setup mailbox
4623 * commands for HBA initialization
4625 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4632 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4633 phba->link_state = LPFC_INIT_MBX_CMDS;
4634 phba->hbq_in_use = 1;
4636 hbq_entry_index = 0;
4637 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4638 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4639 phba->hbqs[hbqno].hbqPutIdx = 0;
4640 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4641 phba->hbqs[hbqno].entry_count =
4642 lpfc_hbq_defs[hbqno]->entry_count;
4643 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4644 hbq_entry_index, pmb);
4645 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4647 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4648 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4649 mbxStatus <status>, ring <num> */
4651 lpfc_printf_log(phba, KERN_ERR,
4652 LOG_SLI | LOG_VPORT,
4653 "1805 Adapter failed to init. "
4654 "Data: x%x x%x x%x\n",
4656 pmbox->mbxStatus, hbqno);
4658 phba->link_state = LPFC_HBA_ERROR;
4659 mempool_free(pmb, phba->mbox_mem_pool);
4663 phba->hbq_count = hbq_count;
4665 mempool_free(pmb, phba->mbox_mem_pool);
4667 /* Initially populate or replenish the HBQs */
4668 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4669 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4674 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4675 * @phba: Pointer to HBA context object.
4677 * This function is called during the SLI initialization to configure
4678 * all the HBQs and post buffers to the HBQ. The caller is not
4679 * required to hold any locks. This function will return zero if successful
4680 * else it will return negative error code.
4683 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4685 phba->hbq_in_use = 1;
4686 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4687 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4688 phba->hbq_count = 1;
4689 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4690 /* Initially populate or replenish the HBQs */
4695 * lpfc_sli_config_port - Issue config port mailbox command
4696 * @phba: Pointer to HBA context object.
4697 * @sli_mode: sli mode - 2/3
4699 * This function is called by the sli initialization code path
4700 * to issue config_port mailbox command. This function restarts the
4701 * HBA firmware and issues a config_port mailbox command to configure
4702 * the SLI interface in the sli mode specified by sli_mode
4703 * variable. The caller is not required to hold any locks.
4704 * The function returns 0 if successful, else returns negative error
4708 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4711 uint32_t resetcount = 0, rc = 0, done = 0;
4713 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4715 phba->link_state = LPFC_HBA_ERROR;
4719 phba->sli_rev = sli_mode;
4720 while (resetcount < 2 && !done) {
4721 spin_lock_irq(&phba->hbalock);
4722 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4723 spin_unlock_irq(&phba->hbalock);
4724 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4725 lpfc_sli_brdrestart(phba);
4726 rc = lpfc_sli_chipset_init(phba);
4730 spin_lock_irq(&phba->hbalock);
4731 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4732 spin_unlock_irq(&phba->hbalock);
4735 /* Call pre CONFIG_PORT mailbox command initialization. A
4736 * value of 0 means the call was successful. Any other
4737 * nonzero value is a failure, but if ERESTART is returned,
4738 * the driver may reset the HBA and try again.
4740 rc = lpfc_config_port_prep(phba);
4741 if (rc == -ERESTART) {
4742 phba->link_state = LPFC_LINK_UNKNOWN;
4747 phba->link_state = LPFC_INIT_MBX_CMDS;
4748 lpfc_config_port(phba, pmb);
4749 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4750 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4751 LPFC_SLI3_HBQ_ENABLED |
4752 LPFC_SLI3_CRP_ENABLED |
4753 LPFC_SLI3_BG_ENABLED |
4754 LPFC_SLI3_DSS_ENABLED);
4755 if (rc != MBX_SUCCESS) {
4756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4757 "0442 Adapter failed to init, mbxCmd x%x "
4758 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4759 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4760 spin_lock_irq(&phba->hbalock);
4761 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4762 spin_unlock_irq(&phba->hbalock);
4765 /* Allow asynchronous mailbox command to go through */
4766 spin_lock_irq(&phba->hbalock);
4767 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4768 spin_unlock_irq(&phba->hbalock);
4771 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4772 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4773 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4774 "3110 Port did not grant ASABT\n");
4779 goto do_prep_failed;
4781 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4782 if (!pmb->u.mb.un.varCfgPort.cMA) {
4784 goto do_prep_failed;
4786 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4787 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4788 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4789 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4790 phba->max_vpi : phba->max_vports;
4794 phba->fips_level = 0;
4795 phba->fips_spec_rev = 0;
4796 if (pmb->u.mb.un.varCfgPort.gdss) {
4797 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4798 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4799 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4801 "2850 Security Crypto Active. FIPS x%d "
4803 phba->fips_level, phba->fips_spec_rev);
4805 if (pmb->u.mb.un.varCfgPort.sec_err) {
4806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4807 "2856 Config Port Security Crypto "
4809 pmb->u.mb.un.varCfgPort.sec_err);
4811 if (pmb->u.mb.un.varCfgPort.gerbm)
4812 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4813 if (pmb->u.mb.un.varCfgPort.gcrp)
4814 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4816 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4817 phba->port_gp = phba->mbox->us.s3_pgp.port;
4819 if (phba->cfg_enable_bg) {
4820 if (pmb->u.mb.un.varCfgPort.gbg)
4821 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4824 "0443 Adapter did not grant "
4828 phba->hbq_get = NULL;
4829 phba->port_gp = phba->mbox->us.s2.port;
4833 mempool_free(pmb, phba->mbox_mem_pool);
4839 * lpfc_sli_hba_setup - SLI initialization function
4840 * @phba: Pointer to HBA context object.
4842 * This function is the main SLI initialization function. This function
4843 * is called by the HBA initialization code, HBA reset code and HBA
4844 * error attention handler code. Caller is not required to hold any
4845 * locks. This function issues config_port mailbox command to configure
4846 * the SLI, setup iocb rings and HBQ rings. In the end the function
4847 * calls the config_port_post function to issue init_link mailbox
4848 * command and to start the discovery. The function will return zero
4849 * if successful, else it will return negative error code.
4852 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4858 switch (phba->cfg_sli_mode) {
4860 if (phba->cfg_enable_npiv) {
4861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4862 "1824 NPIV enabled: Override sli_mode "
4863 "parameter (%d) to auto (0).\n",
4864 phba->cfg_sli_mode);
4873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4874 "1819 Unrecognized sli_mode parameter: %d.\n",
4875 phba->cfg_sli_mode);
4879 phba->fcp_embed_io = 0; /* SLI4 FC support only */
4881 rc = lpfc_sli_config_port(phba, mode);
4883 if (rc && phba->cfg_sli_mode == 3)
4884 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4885 "1820 Unable to select SLI-3. "
4886 "Not supported by adapter.\n");
4887 if (rc && mode != 2)
4888 rc = lpfc_sli_config_port(phba, 2);
4889 else if (rc && mode == 2)
4890 rc = lpfc_sli_config_port(phba, 3);
4892 goto lpfc_sli_hba_setup_error;
4894 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4895 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4896 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4899 "2709 This device supports "
4900 "Advanced Error Reporting (AER)\n");
4901 spin_lock_irq(&phba->hbalock);
4902 phba->hba_flag |= HBA_AER_ENABLED;
4903 spin_unlock_irq(&phba->hbalock);
4905 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4906 "2708 This device does not support "
4907 "Advanced Error Reporting (AER): %d\n",
4909 phba->cfg_aer_support = 0;
4913 if (phba->sli_rev == 3) {
4914 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4915 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4917 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4918 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4919 phba->sli3_options = 0;
4922 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4923 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4924 phba->sli_rev, phba->max_vpi);
4925 rc = lpfc_sli_ring_map(phba);
4928 goto lpfc_sli_hba_setup_error;
4930 /* Initialize VPIs. */
4931 if (phba->sli_rev == LPFC_SLI_REV3) {
4933 * The VPI bitmask and physical ID array are allocated
4934 * and initialized once only - at driver load. A port
4935 * reset doesn't need to reinitialize this memory.
4937 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4938 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4939 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4941 if (!phba->vpi_bmask) {
4943 goto lpfc_sli_hba_setup_error;
4946 phba->vpi_ids = kzalloc(
4947 (phba->max_vpi+1) * sizeof(uint16_t),
4949 if (!phba->vpi_ids) {
4950 kfree(phba->vpi_bmask);
4952 goto lpfc_sli_hba_setup_error;
4954 for (i = 0; i < phba->max_vpi; i++)
4955 phba->vpi_ids[i] = i;
4960 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4961 rc = lpfc_sli_hbq_setup(phba);
4963 goto lpfc_sli_hba_setup_error;
4965 spin_lock_irq(&phba->hbalock);
4966 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4967 spin_unlock_irq(&phba->hbalock);
4969 rc = lpfc_config_port_post(phba);
4971 goto lpfc_sli_hba_setup_error;
4975 lpfc_sli_hba_setup_error:
4976 phba->link_state = LPFC_HBA_ERROR;
4977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4978 "0445 Firmware initialization failed\n");
4983 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4984 * @phba: Pointer to HBA context object.
4985 * @mboxq: mailbox pointer.
4986 * This function issue a dump mailbox command to read config region
4987 * 23 and parse the records in the region and populate driver
4991 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4993 LPFC_MBOXQ_t *mboxq;
4994 struct lpfc_dmabuf *mp;
4995 struct lpfc_mqe *mqe;
4996 uint32_t data_length;
4999 /* Program the default value of vlan_id and fc_map */
5000 phba->valid_vlan = 0;
5001 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5002 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5003 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5005 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5009 mqe = &mboxq->u.mqe;
5010 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5012 goto out_free_mboxq;
5015 mp = (struct lpfc_dmabuf *) mboxq->context1;
5016 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5018 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5019 "(%d):2571 Mailbox cmd x%x Status x%x "
5020 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5021 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5022 "CQ: x%x x%x x%x x%x\n",
5023 mboxq->vport ? mboxq->vport->vpi : 0,
5024 bf_get(lpfc_mqe_command, mqe),
5025 bf_get(lpfc_mqe_status, mqe),
5026 mqe->un.mb_words[0], mqe->un.mb_words[1],
5027 mqe->un.mb_words[2], mqe->un.mb_words[3],
5028 mqe->un.mb_words[4], mqe->un.mb_words[5],
5029 mqe->un.mb_words[6], mqe->un.mb_words[7],
5030 mqe->un.mb_words[8], mqe->un.mb_words[9],
5031 mqe->un.mb_words[10], mqe->un.mb_words[11],
5032 mqe->un.mb_words[12], mqe->un.mb_words[13],
5033 mqe->un.mb_words[14], mqe->un.mb_words[15],
5034 mqe->un.mb_words[16], mqe->un.mb_words[50],
5036 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5037 mboxq->mcqe.trailer);
5040 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5043 goto out_free_mboxq;
5045 data_length = mqe->un.mb_words[5];
5046 if (data_length > DMP_RGN23_SIZE) {
5047 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5050 goto out_free_mboxq;
5053 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5054 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5059 mempool_free(mboxq, phba->mbox_mem_pool);
5064 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5065 * @phba: pointer to lpfc hba data structure.
5066 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5067 * @vpd: pointer to the memory to hold resulting port vpd data.
5068 * @vpd_size: On input, the number of bytes allocated to @vpd.
5069 * On output, the number of data bytes in @vpd.
5071 * This routine executes a READ_REV SLI4 mailbox command. In
5072 * addition, this routine gets the port vpd data.
5076 * -ENOMEM - could not allocated memory.
5079 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5080 uint8_t *vpd, uint32_t *vpd_size)
5084 struct lpfc_dmabuf *dmabuf;
5085 struct lpfc_mqe *mqe;
5087 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5092 * Get a DMA buffer for the vpd data resulting from the READ_REV
5095 dma_size = *vpd_size;
5096 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5097 &dmabuf->phys, GFP_KERNEL);
5098 if (!dmabuf->virt) {
5104 * The SLI4 implementation of READ_REV conflicts at word1,
5105 * bits 31:16 and SLI4 adds vpd functionality not present
5106 * in SLI3. This code corrects the conflicts.
5108 lpfc_read_rev(phba, mboxq);
5109 mqe = &mboxq->u.mqe;
5110 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5111 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5112 mqe->un.read_rev.word1 &= 0x0000FFFF;
5113 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5114 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5116 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5118 dma_free_coherent(&phba->pcidev->dev, dma_size,
5119 dmabuf->virt, dmabuf->phys);
5125 * The available vpd length cannot be bigger than the
5126 * DMA buffer passed to the port. Catch the less than
5127 * case and update the caller's size.
5129 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5130 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5132 memcpy(vpd, dmabuf->virt, *vpd_size);
5134 dma_free_coherent(&phba->pcidev->dev, dma_size,
5135 dmabuf->virt, dmabuf->phys);
5141 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5142 * @phba: pointer to lpfc hba data structure.
5144 * This routine retrieves SLI4 device physical port name this PCI function
5149 * otherwise - failed to retrieve physical port name
5152 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5154 LPFC_MBOXQ_t *mboxq;
5155 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5156 struct lpfc_controller_attribute *cntl_attr;
5157 struct lpfc_mbx_get_port_name *get_port_name;
5158 void *virtaddr = NULL;
5159 uint32_t alloclen, reqlen;
5160 uint32_t shdr_status, shdr_add_status;
5161 union lpfc_sli4_cfg_shdr *shdr;
5162 char cport_name = 0;
5165 /* We assume nothing at this point */
5166 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5167 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5169 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5172 /* obtain link type and link number via READ_CONFIG */
5173 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5174 lpfc_sli4_read_config(phba);
5175 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5176 goto retrieve_ppname;
5178 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5179 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5180 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5181 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5182 LPFC_SLI4_MBX_NEMBED);
5183 if (alloclen < reqlen) {
5184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5185 "3084 Allocated DMA memory size (%d) is "
5186 "less than the requested DMA memory size "
5187 "(%d)\n", alloclen, reqlen);
5189 goto out_free_mboxq;
5191 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5192 virtaddr = mboxq->sge_array->addr[0];
5193 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5194 shdr = &mbx_cntl_attr->cfg_shdr;
5195 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5196 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5197 if (shdr_status || shdr_add_status || rc) {
5198 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5199 "3085 Mailbox x%x (x%x/x%x) failed, "
5200 "rc:x%x, status:x%x, add_status:x%x\n",
5201 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5202 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5203 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5204 rc, shdr_status, shdr_add_status);
5206 goto out_free_mboxq;
5208 cntl_attr = &mbx_cntl_attr->cntl_attr;
5209 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5210 phba->sli4_hba.lnk_info.lnk_tp =
5211 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5212 phba->sli4_hba.lnk_info.lnk_no =
5213 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5214 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5215 "3086 lnk_type:%d, lnk_numb:%d\n",
5216 phba->sli4_hba.lnk_info.lnk_tp,
5217 phba->sli4_hba.lnk_info.lnk_no);
5220 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5221 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5222 sizeof(struct lpfc_mbx_get_port_name) -
5223 sizeof(struct lpfc_sli4_cfg_mhdr),
5224 LPFC_SLI4_MBX_EMBED);
5225 get_port_name = &mboxq->u.mqe.un.get_port_name;
5226 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5227 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5228 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5229 phba->sli4_hba.lnk_info.lnk_tp);
5230 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5231 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5232 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5233 if (shdr_status || shdr_add_status || rc) {
5234 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5235 "3087 Mailbox x%x (x%x/x%x) failed: "
5236 "rc:x%x, status:x%x, add_status:x%x\n",
5237 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5238 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5239 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5240 rc, shdr_status, shdr_add_status);
5242 goto out_free_mboxq;
5244 switch (phba->sli4_hba.lnk_info.lnk_no) {
5245 case LPFC_LINK_NUMBER_0:
5246 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5247 &get_port_name->u.response);
5248 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5250 case LPFC_LINK_NUMBER_1:
5251 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5252 &get_port_name->u.response);
5253 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5255 case LPFC_LINK_NUMBER_2:
5256 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5257 &get_port_name->u.response);
5258 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5260 case LPFC_LINK_NUMBER_3:
5261 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5262 &get_port_name->u.response);
5263 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5269 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5270 phba->Port[0] = cport_name;
5271 phba->Port[1] = '\0';
5272 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5273 "3091 SLI get port name: %s\n", phba->Port);
5277 if (rc != MBX_TIMEOUT) {
5278 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5279 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5281 mempool_free(mboxq, phba->mbox_mem_pool);
5287 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5288 * @phba: pointer to lpfc hba data structure.
5290 * This routine is called to explicitly arm the SLI4 device's completion and
5294 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5298 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5299 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5300 if (phba->sli4_hba.nvmels_cq)
5301 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5304 if (phba->sli4_hba.fcp_cq)
5305 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5306 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5309 if (phba->sli4_hba.nvme_cq)
5310 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5311 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5315 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5317 if (phba->sli4_hba.hba_eq)
5318 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5319 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5322 if (phba->nvmet_support) {
5323 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5324 lpfc_sli4_cq_release(
5325 phba->sli4_hba.nvmet_cqset[qidx],
5331 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5335 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5336 * @phba: Pointer to HBA context object.
5337 * @type: The resource extent type.
5338 * @extnt_count: buffer to hold port available extent count.
5339 * @extnt_size: buffer to hold element count per extent.
5341 * This function calls the port and retrievs the number of available
5342 * extents and their size for a particular extent type.
5344 * Returns: 0 if successful. Nonzero otherwise.
5347 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5348 uint16_t *extnt_count, uint16_t *extnt_size)
5353 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5356 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5360 /* Find out how many extents are available for this resource type */
5361 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5362 sizeof(struct lpfc_sli4_cfg_mhdr));
5363 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5364 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5365 length, LPFC_SLI4_MBX_EMBED);
5367 /* Send an extents count of 0 - the GET doesn't use it. */
5368 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5369 LPFC_SLI4_MBX_EMBED);
5375 if (!phba->sli4_hba.intr_enable)
5376 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5378 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5379 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5386 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5387 if (bf_get(lpfc_mbox_hdr_status,
5388 &rsrc_info->header.cfg_shdr.response)) {
5389 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5390 "2930 Failed to get resource extents "
5391 "Status 0x%x Add'l Status 0x%x\n",
5392 bf_get(lpfc_mbox_hdr_status,
5393 &rsrc_info->header.cfg_shdr.response),
5394 bf_get(lpfc_mbox_hdr_add_status,
5395 &rsrc_info->header.cfg_shdr.response));
5400 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5402 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5405 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5406 "3162 Retrieved extents type-%d from port: count:%d, "
5407 "size:%d\n", type, *extnt_count, *extnt_size);
5410 mempool_free(mbox, phba->mbox_mem_pool);
5415 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5416 * @phba: Pointer to HBA context object.
5417 * @type: The extent type to check.
5419 * This function reads the current available extents from the port and checks
5420 * if the extent count or extent size has changed since the last access.
5421 * Callers use this routine post port reset to understand if there is a
5422 * extent reprovisioning requirement.
5425 * -Error: error indicates problem.
5426 * 1: Extent count or size has changed.
5430 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5432 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5433 uint16_t size_diff, rsrc_ext_size;
5435 struct lpfc_rsrc_blks *rsrc_entry;
5436 struct list_head *rsrc_blk_list = NULL;
5440 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5447 case LPFC_RSC_TYPE_FCOE_RPI:
5448 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5450 case LPFC_RSC_TYPE_FCOE_VPI:
5451 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5453 case LPFC_RSC_TYPE_FCOE_XRI:
5454 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5456 case LPFC_RSC_TYPE_FCOE_VFI:
5457 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5463 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5465 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5469 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5476 * lpfc_sli4_cfg_post_extnts -
5477 * @phba: Pointer to HBA context object.
5478 * @extnt_cnt - number of available extents.
5479 * @type - the extent type (rpi, xri, vfi, vpi).
5480 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5481 * @mbox - pointer to the caller's allocated mailbox structure.
5483 * This function executes the extents allocation request. It also
5484 * takes care of the amount of memory needed to allocate or get the
5485 * allocated extents. It is the caller's responsibility to evaluate
5489 * -Error: Error value describes the condition found.
5493 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5494 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5499 uint32_t alloc_len, mbox_tmo;
5501 /* Calculate the total requested length of the dma memory */
5502 req_len = extnt_cnt * sizeof(uint16_t);
5505 * Calculate the size of an embedded mailbox. The uint32_t
5506 * accounts for extents-specific word.
5508 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5512 * Presume the allocation and response will fit into an embedded
5513 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5515 *emb = LPFC_SLI4_MBX_EMBED;
5516 if (req_len > emb_len) {
5517 req_len = extnt_cnt * sizeof(uint16_t) +
5518 sizeof(union lpfc_sli4_cfg_shdr) +
5520 *emb = LPFC_SLI4_MBX_NEMBED;
5523 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5524 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5526 if (alloc_len < req_len) {
5527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5528 "2982 Allocated DMA memory size (x%x) is "
5529 "less than the requested DMA memory "
5530 "size (x%x)\n", alloc_len, req_len);
5533 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5537 if (!phba->sli4_hba.intr_enable)
5538 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5540 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5541 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5550 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5551 * @phba: Pointer to HBA context object.
5552 * @type: The resource extent type to allocate.
5554 * This function allocates the number of elements for the specified
5558 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5561 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5562 uint16_t rsrc_id, rsrc_start, j, k;
5565 unsigned long longs;
5566 unsigned long *bmask;
5567 struct lpfc_rsrc_blks *rsrc_blks;
5570 struct lpfc_id_range *id_array = NULL;
5571 void *virtaddr = NULL;
5572 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5573 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5574 struct list_head *ext_blk_list;
5576 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5582 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5583 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5584 "3009 No available Resource Extents "
5585 "for resource type 0x%x: Count: 0x%x, "
5586 "Size 0x%x\n", type, rsrc_cnt,
5591 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5592 "2903 Post resource extents type-0x%x: "
5593 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5595 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5599 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5606 * Figure out where the response is located. Then get local pointers
5607 * to the response data. The port does not guarantee to respond to
5608 * all extents counts request so update the local variable with the
5609 * allocated count from the port.
5611 if (emb == LPFC_SLI4_MBX_EMBED) {
5612 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5613 id_array = &rsrc_ext->u.rsp.id[0];
5614 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5616 virtaddr = mbox->sge_array->addr[0];
5617 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5618 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5619 id_array = &n_rsrc->id;
5622 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5623 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5626 * Based on the resource size and count, correct the base and max
5629 length = sizeof(struct lpfc_rsrc_blks);
5631 case LPFC_RSC_TYPE_FCOE_RPI:
5632 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5633 sizeof(unsigned long),
5635 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5639 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5642 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5643 kfree(phba->sli4_hba.rpi_bmask);
5649 * The next_rpi was initialized with the maximum available
5650 * count but the port may allocate a smaller number. Catch
5651 * that case and update the next_rpi.
5653 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5655 /* Initialize local ptrs for common extent processing later. */
5656 bmask = phba->sli4_hba.rpi_bmask;
5657 ids = phba->sli4_hba.rpi_ids;
5658 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5660 case LPFC_RSC_TYPE_FCOE_VPI:
5661 phba->vpi_bmask = kzalloc(longs *
5662 sizeof(unsigned long),
5664 if (unlikely(!phba->vpi_bmask)) {
5668 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5671 if (unlikely(!phba->vpi_ids)) {
5672 kfree(phba->vpi_bmask);
5677 /* Initialize local ptrs for common extent processing later. */
5678 bmask = phba->vpi_bmask;
5679 ids = phba->vpi_ids;
5680 ext_blk_list = &phba->lpfc_vpi_blk_list;
5682 case LPFC_RSC_TYPE_FCOE_XRI:
5683 phba->sli4_hba.xri_bmask = kzalloc(longs *
5684 sizeof(unsigned long),
5686 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5690 phba->sli4_hba.max_cfg_param.xri_used = 0;
5691 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5694 if (unlikely(!phba->sli4_hba.xri_ids)) {
5695 kfree(phba->sli4_hba.xri_bmask);
5700 /* Initialize local ptrs for common extent processing later. */
5701 bmask = phba->sli4_hba.xri_bmask;
5702 ids = phba->sli4_hba.xri_ids;
5703 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5705 case LPFC_RSC_TYPE_FCOE_VFI:
5706 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5707 sizeof(unsigned long),
5709 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5713 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5716 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5717 kfree(phba->sli4_hba.vfi_bmask);
5722 /* Initialize local ptrs for common extent processing later. */
5723 bmask = phba->sli4_hba.vfi_bmask;
5724 ids = phba->sli4_hba.vfi_ids;
5725 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5728 /* Unsupported Opcode. Fail call. */
5732 ext_blk_list = NULL;
5737 * Complete initializing the extent configuration with the
5738 * allocated ids assigned to this function. The bitmask serves
5739 * as an index into the array and manages the available ids. The
5740 * array just stores the ids communicated to the port via the wqes.
5742 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5744 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5747 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5750 rsrc_blks = kzalloc(length, GFP_KERNEL);
5751 if (unlikely(!rsrc_blks)) {
5757 rsrc_blks->rsrc_start = rsrc_id;
5758 rsrc_blks->rsrc_size = rsrc_size;
5759 list_add_tail(&rsrc_blks->list, ext_blk_list);
5760 rsrc_start = rsrc_id;
5761 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5762 phba->sli4_hba.scsi_xri_start = rsrc_start +
5763 lpfc_sli4_get_iocb_cnt(phba);
5764 phba->sli4_hba.nvme_xri_start =
5765 phba->sli4_hba.scsi_xri_start +
5766 phba->sli4_hba.scsi_xri_max;
5769 while (rsrc_id < (rsrc_start + rsrc_size)) {
5774 /* Entire word processed. Get next word.*/
5779 lpfc_sli4_mbox_cmd_free(phba, mbox);
5786 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5787 * @phba: Pointer to HBA context object.
5788 * @type: the extent's type.
5790 * This function deallocates all extents of a particular resource type.
5791 * SLI4 does not allow for deallocating a particular extent range. It
5792 * is the caller's responsibility to release all kernel memory resources.
5795 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5798 uint32_t length, mbox_tmo = 0;
5800 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5801 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5803 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5808 * This function sends an embedded mailbox because it only sends the
5809 * the resource type. All extents of this type are released by the
5812 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5816 length, LPFC_SLI4_MBX_EMBED);
5818 /* Send an extents count of 0 - the dealloc doesn't use it. */
5819 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5820 LPFC_SLI4_MBX_EMBED);
5825 if (!phba->sli4_hba.intr_enable)
5826 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5828 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5829 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5836 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5837 if (bf_get(lpfc_mbox_hdr_status,
5838 &dealloc_rsrc->header.cfg_shdr.response)) {
5839 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5840 "2919 Failed to release resource extents "
5841 "for type %d - Status 0x%x Add'l Status 0x%x. "
5842 "Resource memory not released.\n",
5844 bf_get(lpfc_mbox_hdr_status,
5845 &dealloc_rsrc->header.cfg_shdr.response),
5846 bf_get(lpfc_mbox_hdr_add_status,
5847 &dealloc_rsrc->header.cfg_shdr.response));
5852 /* Release kernel memory resources for the specific type. */
5854 case LPFC_RSC_TYPE_FCOE_VPI:
5855 kfree(phba->vpi_bmask);
5856 kfree(phba->vpi_ids);
5857 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5858 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5859 &phba->lpfc_vpi_blk_list, list) {
5860 list_del_init(&rsrc_blk->list);
5863 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5865 case LPFC_RSC_TYPE_FCOE_XRI:
5866 kfree(phba->sli4_hba.xri_bmask);
5867 kfree(phba->sli4_hba.xri_ids);
5868 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5869 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5870 list_del_init(&rsrc_blk->list);
5874 case LPFC_RSC_TYPE_FCOE_VFI:
5875 kfree(phba->sli4_hba.vfi_bmask);
5876 kfree(phba->sli4_hba.vfi_ids);
5877 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5878 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5879 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5880 list_del_init(&rsrc_blk->list);
5884 case LPFC_RSC_TYPE_FCOE_RPI:
5885 /* RPI bitmask and physical id array are cleaned up earlier. */
5886 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5887 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5888 list_del_init(&rsrc_blk->list);
5896 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5899 mempool_free(mbox, phba->mbox_mem_pool);
5904 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5909 len = sizeof(struct lpfc_mbx_set_feature) -
5910 sizeof(struct lpfc_sli4_cfg_mhdr);
5911 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5912 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5913 LPFC_SLI4_MBX_EMBED);
5916 case LPFC_SET_UE_RECOVERY:
5917 bf_set(lpfc_mbx_set_feature_UER,
5918 &mbox->u.mqe.un.set_feature, 1);
5919 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5920 mbox->u.mqe.un.set_feature.param_len = 8;
5922 case LPFC_SET_MDS_DIAGS:
5923 bf_set(lpfc_mbx_set_feature_mds,
5924 &mbox->u.mqe.un.set_feature, 1);
5925 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5926 &mbox->u.mqe.un.set_feature, 1);
5927 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5928 mbox->u.mqe.un.set_feature.param_len = 8;
5936 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5937 * @phba: Pointer to HBA context object.
5939 * This function allocates all SLI4 resource identifiers.
5942 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5944 int i, rc, error = 0;
5945 uint16_t count, base;
5946 unsigned long longs;
5948 if (!phba->sli4_hba.rpi_hdrs_in_use)
5949 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5950 if (phba->sli4_hba.extents_in_use) {
5952 * The port supports resource extents. The XRI, VPI, VFI, RPI
5953 * resource extent count must be read and allocated before
5954 * provisioning the resource id arrays.
5956 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5957 LPFC_IDX_RSRC_RDY) {
5959 * Extent-based resources are set - the driver could
5960 * be in a port reset. Figure out if any corrective
5961 * actions need to be taken.
5963 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5964 LPFC_RSC_TYPE_FCOE_VFI);
5967 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5968 LPFC_RSC_TYPE_FCOE_VPI);
5971 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5972 LPFC_RSC_TYPE_FCOE_XRI);
5975 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5976 LPFC_RSC_TYPE_FCOE_RPI);
5981 * It's possible that the number of resources
5982 * provided to this port instance changed between
5983 * resets. Detect this condition and reallocate
5984 * resources. Otherwise, there is no action.
5987 lpfc_printf_log(phba, KERN_INFO,
5988 LOG_MBOX | LOG_INIT,
5989 "2931 Detected extent resource "
5990 "change. Reallocating all "
5992 rc = lpfc_sli4_dealloc_extent(phba,
5993 LPFC_RSC_TYPE_FCOE_VFI);
5994 rc = lpfc_sli4_dealloc_extent(phba,
5995 LPFC_RSC_TYPE_FCOE_VPI);
5996 rc = lpfc_sli4_dealloc_extent(phba,
5997 LPFC_RSC_TYPE_FCOE_XRI);
5998 rc = lpfc_sli4_dealloc_extent(phba,
5999 LPFC_RSC_TYPE_FCOE_RPI);
6004 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6008 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6012 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6016 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6019 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6024 * The port does not support resource extents. The XRI, VPI,
6025 * VFI, RPI resource ids were determined from READ_CONFIG.
6026 * Just allocate the bitmasks and provision the resource id
6027 * arrays. If a port reset is active, the resources don't
6028 * need any action - just exit.
6030 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6031 LPFC_IDX_RSRC_RDY) {
6032 lpfc_sli4_dealloc_resource_identifiers(phba);
6033 lpfc_sli4_remove_rpis(phba);
6036 count = phba->sli4_hba.max_cfg_param.max_rpi;
6038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6039 "3279 Invalid provisioning of "
6044 base = phba->sli4_hba.max_cfg_param.rpi_base;
6045 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6046 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6047 sizeof(unsigned long),
6049 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6053 phba->sli4_hba.rpi_ids = kzalloc(count *
6056 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6058 goto free_rpi_bmask;
6061 for (i = 0; i < count; i++)
6062 phba->sli4_hba.rpi_ids[i] = base + i;
6065 count = phba->sli4_hba.max_cfg_param.max_vpi;
6067 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6068 "3280 Invalid provisioning of "
6073 base = phba->sli4_hba.max_cfg_param.vpi_base;
6074 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6075 phba->vpi_bmask = kzalloc(longs *
6076 sizeof(unsigned long),
6078 if (unlikely(!phba->vpi_bmask)) {
6082 phba->vpi_ids = kzalloc(count *
6085 if (unlikely(!phba->vpi_ids)) {
6087 goto free_vpi_bmask;
6090 for (i = 0; i < count; i++)
6091 phba->vpi_ids[i] = base + i;
6094 count = phba->sli4_hba.max_cfg_param.max_xri;
6096 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6097 "3281 Invalid provisioning of "
6102 base = phba->sli4_hba.max_cfg_param.xri_base;
6103 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6104 phba->sli4_hba.xri_bmask = kzalloc(longs *
6105 sizeof(unsigned long),
6107 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6111 phba->sli4_hba.max_cfg_param.xri_used = 0;
6112 phba->sli4_hba.xri_ids = kzalloc(count *
6115 if (unlikely(!phba->sli4_hba.xri_ids)) {
6117 goto free_xri_bmask;
6120 for (i = 0; i < count; i++)
6121 phba->sli4_hba.xri_ids[i] = base + i;
6124 count = phba->sli4_hba.max_cfg_param.max_vfi;
6126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6127 "3282 Invalid provisioning of "
6132 base = phba->sli4_hba.max_cfg_param.vfi_base;
6133 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6134 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6135 sizeof(unsigned long),
6137 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6141 phba->sli4_hba.vfi_ids = kzalloc(count *
6144 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6146 goto free_vfi_bmask;
6149 for (i = 0; i < count; i++)
6150 phba->sli4_hba.vfi_ids[i] = base + i;
6153 * Mark all resources ready. An HBA reset doesn't need
6154 * to reset the initialization.
6156 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6162 kfree(phba->sli4_hba.vfi_bmask);
6163 phba->sli4_hba.vfi_bmask = NULL;
6165 kfree(phba->sli4_hba.xri_ids);
6166 phba->sli4_hba.xri_ids = NULL;
6168 kfree(phba->sli4_hba.xri_bmask);
6169 phba->sli4_hba.xri_bmask = NULL;
6171 kfree(phba->vpi_ids);
6172 phba->vpi_ids = NULL;
6174 kfree(phba->vpi_bmask);
6175 phba->vpi_bmask = NULL;
6177 kfree(phba->sli4_hba.rpi_ids);
6178 phba->sli4_hba.rpi_ids = NULL;
6180 kfree(phba->sli4_hba.rpi_bmask);
6181 phba->sli4_hba.rpi_bmask = NULL;
6187 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6188 * @phba: Pointer to HBA context object.
6190 * This function allocates the number of elements for the specified
6194 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6196 if (phba->sli4_hba.extents_in_use) {
6197 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6198 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6199 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6200 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6202 kfree(phba->vpi_bmask);
6203 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6204 kfree(phba->vpi_ids);
6205 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6206 kfree(phba->sli4_hba.xri_bmask);
6207 kfree(phba->sli4_hba.xri_ids);
6208 kfree(phba->sli4_hba.vfi_bmask);
6209 kfree(phba->sli4_hba.vfi_ids);
6210 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6211 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6218 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6219 * @phba: Pointer to HBA context object.
6220 * @type: The resource extent type.
6221 * @extnt_count: buffer to hold port extent count response
6222 * @extnt_size: buffer to hold port extent size response.
6224 * This function calls the port to read the host allocated extents
6225 * for a particular type.
6228 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6229 uint16_t *extnt_cnt, uint16_t *extnt_size)
6233 uint16_t curr_blks = 0;
6234 uint32_t req_len, emb_len;
6235 uint32_t alloc_len, mbox_tmo;
6236 struct list_head *blk_list_head;
6237 struct lpfc_rsrc_blks *rsrc_blk;
6239 void *virtaddr = NULL;
6240 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6241 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6242 union lpfc_sli4_cfg_shdr *shdr;
6245 case LPFC_RSC_TYPE_FCOE_VPI:
6246 blk_list_head = &phba->lpfc_vpi_blk_list;
6248 case LPFC_RSC_TYPE_FCOE_XRI:
6249 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6251 case LPFC_RSC_TYPE_FCOE_VFI:
6252 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6254 case LPFC_RSC_TYPE_FCOE_RPI:
6255 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6261 /* Count the number of extents currently allocatd for this type. */
6262 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6263 if (curr_blks == 0) {
6265 * The GET_ALLOCATED mailbox does not return the size,
6266 * just the count. The size should be just the size
6267 * stored in the current allocated block and all sizes
6268 * for an extent type are the same so set the return
6271 *extnt_size = rsrc_blk->rsrc_size;
6277 * Calculate the size of an embedded mailbox. The uint32_t
6278 * accounts for extents-specific word.
6280 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6284 * Presume the allocation and response will fit into an embedded
6285 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6287 emb = LPFC_SLI4_MBX_EMBED;
6289 if (req_len > emb_len) {
6290 req_len = curr_blks * sizeof(uint16_t) +
6291 sizeof(union lpfc_sli4_cfg_shdr) +
6293 emb = LPFC_SLI4_MBX_NEMBED;
6296 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6299 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6301 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6302 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6304 if (alloc_len < req_len) {
6305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6306 "2983 Allocated DMA memory size (x%x) is "
6307 "less than the requested DMA memory "
6308 "size (x%x)\n", alloc_len, req_len);
6312 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6318 if (!phba->sli4_hba.intr_enable)
6319 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6321 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6322 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6331 * Figure out where the response is located. Then get local pointers
6332 * to the response data. The port does not guarantee to respond to
6333 * all extents counts request so update the local variable with the
6334 * allocated count from the port.
6336 if (emb == LPFC_SLI4_MBX_EMBED) {
6337 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6338 shdr = &rsrc_ext->header.cfg_shdr;
6339 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6341 virtaddr = mbox->sge_array->addr[0];
6342 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6343 shdr = &n_rsrc->cfg_shdr;
6344 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6347 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6349 "2984 Failed to read allocated resources "
6350 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6352 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6353 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6358 lpfc_sli4_mbox_cmd_free(phba, mbox);
6363 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6364 * @phba: pointer to lpfc hba data structure.
6365 * @pring: Pointer to driver SLI ring object.
6366 * @sgl_list: linked link of sgl buffers to post
6367 * @cnt: number of linked list buffers
6369 * This routine walks the list of buffers that have been allocated and
6370 * repost them to the port by using SGL block post. This is needed after a
6371 * pci_function_reset/warm_start or start. It attempts to construct blocks
6372 * of buffer sgls which contains contiguous xris and uses the non-embedded
6373 * SGL block post mailbox commands to post them to the port. For single
6374 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6375 * mailbox command for posting.
6377 * Returns: 0 = success, non-zero failure.
6380 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6381 struct list_head *sgl_list, int cnt)
6383 struct lpfc_sglq *sglq_entry = NULL;
6384 struct lpfc_sglq *sglq_entry_next = NULL;
6385 struct lpfc_sglq *sglq_entry_first = NULL;
6386 int status, total_cnt;
6387 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6388 int last_xritag = NO_XRI;
6389 LIST_HEAD(prep_sgl_list);
6390 LIST_HEAD(blck_sgl_list);
6391 LIST_HEAD(allc_sgl_list);
6392 LIST_HEAD(post_sgl_list);
6393 LIST_HEAD(free_sgl_list);
6395 spin_lock_irq(&phba->hbalock);
6396 spin_lock(&phba->sli4_hba.sgl_list_lock);
6397 list_splice_init(sgl_list, &allc_sgl_list);
6398 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6399 spin_unlock_irq(&phba->hbalock);
6402 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6403 &allc_sgl_list, list) {
6404 list_del_init(&sglq_entry->list);
6406 if ((last_xritag != NO_XRI) &&
6407 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6408 /* a hole in xri block, form a sgl posting block */
6409 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6410 post_cnt = block_cnt - 1;
6411 /* prepare list for next posting block */
6412 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6415 /* prepare list for next posting block */
6416 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6417 /* enough sgls for non-embed sgl mbox command */
6418 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6419 list_splice_init(&prep_sgl_list,
6421 post_cnt = block_cnt;
6427 /* keep track of last sgl's xritag */
6428 last_xritag = sglq_entry->sli4_xritag;
6430 /* end of repost sgl list condition for buffers */
6431 if (num_posted == total_cnt) {
6432 if (post_cnt == 0) {
6433 list_splice_init(&prep_sgl_list,
6435 post_cnt = block_cnt;
6436 } else if (block_cnt == 1) {
6437 status = lpfc_sli4_post_sgl(phba,
6438 sglq_entry->phys, 0,
6439 sglq_entry->sli4_xritag);
6441 /* successful, put sgl to posted list */
6442 list_add_tail(&sglq_entry->list,
6445 /* Failure, put sgl to free list */
6446 lpfc_printf_log(phba, KERN_WARNING,
6448 "3159 Failed to post "
6449 "sgl, xritag:x%x\n",
6450 sglq_entry->sli4_xritag);
6451 list_add_tail(&sglq_entry->list,
6458 /* continue until a nembed page worth of sgls */
6462 /* post the buffer list sgls as a block */
6463 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6467 /* success, put sgl list to posted sgl list */
6468 list_splice_init(&blck_sgl_list, &post_sgl_list);
6470 /* Failure, put sgl list to free sgl list */
6471 sglq_entry_first = list_first_entry(&blck_sgl_list,
6474 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6475 "3160 Failed to post sgl-list, "
6477 sglq_entry_first->sli4_xritag,
6478 (sglq_entry_first->sli4_xritag +
6480 list_splice_init(&blck_sgl_list, &free_sgl_list);
6481 total_cnt -= post_cnt;
6484 /* don't reset xirtag due to hole in xri block */
6486 last_xritag = NO_XRI;
6488 /* reset sgl post count for next round of posting */
6492 /* free the sgls failed to post */
6493 lpfc_free_sgl_list(phba, &free_sgl_list);
6495 /* push sgls posted to the available list */
6496 if (!list_empty(&post_sgl_list)) {
6497 spin_lock_irq(&phba->hbalock);
6498 spin_lock(&phba->sli4_hba.sgl_list_lock);
6499 list_splice_init(&post_sgl_list, sgl_list);
6500 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6501 spin_unlock_irq(&phba->hbalock);
6503 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6504 "3161 Failure to post sgl to port.\n");
6508 /* return the number of XRIs actually posted */
6513 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6517 len = sizeof(struct lpfc_mbx_set_host_data) -
6518 sizeof(struct lpfc_sli4_cfg_mhdr);
6519 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6520 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6521 LPFC_SLI4_MBX_EMBED);
6523 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6524 mbox->u.mqe.un.set_host_data.param_len =
6525 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6526 snprintf(mbox->u.mqe.un.set_host_data.data,
6527 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6528 "Linux %s v"LPFC_DRIVER_VERSION,
6529 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6533 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6534 struct lpfc_queue *drq, int count, int idx)
6537 struct lpfc_rqe hrqe;
6538 struct lpfc_rqe drqe;
6539 struct lpfc_rqb *rqbp;
6540 struct rqb_dmabuf *rqb_buffer;
6541 LIST_HEAD(rqb_buf_list);
6544 for (i = 0; i < count; i++) {
6545 /* IF RQ is already full, don't bother */
6546 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6548 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6551 rqb_buffer->hrq = hrq;
6552 rqb_buffer->drq = drq;
6553 rqb_buffer->idx = idx;
6554 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6556 while (!list_empty(&rqb_buf_list)) {
6557 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6560 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6561 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6562 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6563 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6564 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6566 rqbp->rqb_free_buffer(phba, rqb_buffer);
6568 list_add_tail(&rqb_buffer->hbuf.list,
6569 &rqbp->rqb_buffer_list);
6570 rqbp->buffer_count++;
6577 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6578 * @phba: Pointer to HBA context object.
6580 * This function is the main SLI4 device initialization PCI function. This
6581 * function is called by the HBA initialization code, HBA reset code and
6582 * HBA error attention handler code. Caller is not required to hold any
6586 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6589 LPFC_MBOXQ_t *mboxq;
6590 struct lpfc_mqe *mqe;
6593 uint32_t ftr_rsp = 0;
6594 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6595 struct lpfc_vport *vport = phba->pport;
6596 struct lpfc_dmabuf *mp;
6597 struct lpfc_rqb *rqbp;
6599 /* Perform a PCI function reset to start from clean */
6600 rc = lpfc_pci_function_reset(phba);
6604 /* Check the HBA Host Status Register for readyness */
6605 rc = lpfc_sli4_post_status_check(phba);
6609 spin_lock_irq(&phba->hbalock);
6610 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6611 spin_unlock_irq(&phba->hbalock);
6615 * Allocate a single mailbox container for initializing the
6618 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6622 /* Issue READ_REV to collect vpd and FW information. */
6623 vpd_size = SLI4_PAGE_SIZE;
6624 vpd = kzalloc(vpd_size, GFP_KERNEL);
6630 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6636 mqe = &mboxq->u.mqe;
6637 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6638 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6639 phba->hba_flag |= HBA_FCOE_MODE;
6640 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6642 phba->hba_flag &= ~HBA_FCOE_MODE;
6645 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6647 phba->hba_flag |= HBA_FIP_SUPPORT;
6649 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6651 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6653 if (phba->sli_rev != LPFC_SLI_REV4) {
6654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6655 "0376 READ_REV Error. SLI Level %d "
6656 "FCoE enabled %d\n",
6657 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6664 * Continue initialization with default values even if driver failed
6665 * to read FCoE param config regions, only read parameters if the
6668 if (phba->hba_flag & HBA_FCOE_MODE &&
6669 lpfc_sli4_read_fcoe_params(phba))
6670 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6671 "2570 Failed to read FCoE parameters\n");
6674 * Retrieve sli4 device physical port name, failure of doing it
6675 * is considered as non-fatal.
6677 rc = lpfc_sli4_retrieve_pport_name(phba);
6679 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6680 "3080 Successful retrieving SLI4 device "
6681 "physical port name: %s.\n", phba->Port);
6684 * Evaluate the read rev and vpd data. Populate the driver
6685 * state with the results. If this routine fails, the failure
6686 * is not fatal as the driver will use generic values.
6688 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6689 if (unlikely(!rc)) {
6690 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6691 "0377 Error %d parsing vpd. "
6692 "Using defaults.\n", rc);
6697 /* Save information as VPD data */
6698 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6699 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6700 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6701 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6703 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6705 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6707 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6709 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6710 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6711 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6712 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6713 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6714 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6715 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6716 "(%d):0380 READ_REV Status x%x "
6717 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6718 mboxq->vport ? mboxq->vport->vpi : 0,
6719 bf_get(lpfc_mqe_status, mqe),
6720 phba->vpd.rev.opFwName,
6721 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6722 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6724 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6725 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6726 if (phba->pport->cfg_lun_queue_depth > rc) {
6727 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6728 "3362 LUN queue depth changed from %d to %d\n",
6729 phba->pport->cfg_lun_queue_depth, rc);
6730 phba->pport->cfg_lun_queue_depth = rc;
6733 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6734 LPFC_SLI_INTF_IF_TYPE_0) {
6735 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6736 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6737 if (rc == MBX_SUCCESS) {
6738 phba->hba_flag |= HBA_RECOVERABLE_UE;
6739 /* Set 1Sec interval to detect UE */
6740 phba->eratt_poll_interval = 1;
6741 phba->sli4_hba.ue_to_sr = bf_get(
6742 lpfc_mbx_set_feature_UESR,
6743 &mboxq->u.mqe.un.set_feature);
6744 phba->sli4_hba.ue_to_rp = bf_get(
6745 lpfc_mbx_set_feature_UERP,
6746 &mboxq->u.mqe.un.set_feature);
6750 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6751 /* Enable MDS Diagnostics only if the SLI Port supports it */
6752 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6753 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6754 if (rc != MBX_SUCCESS)
6755 phba->mds_diags_support = 0;
6759 * Discover the port's supported feature set and match it against the
6762 lpfc_request_features(phba, mboxq);
6763 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6770 * The port must support FCP initiator mode as this is the
6771 * only mode running in the host.
6773 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6774 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6775 "0378 No support for fcpi mode.\n");
6778 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6779 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6781 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6783 * If the port cannot support the host's requested features
6784 * then turn off the global config parameters to disable the
6785 * feature in the driver. This is not a fatal error.
6787 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6788 if (phba->cfg_enable_bg) {
6789 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6790 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6795 if (phba->max_vpi && phba->cfg_enable_npiv &&
6796 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6800 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6801 "0379 Feature Mismatch Data: x%08x %08x "
6802 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6803 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6804 phba->cfg_enable_npiv, phba->max_vpi);
6805 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6806 phba->cfg_enable_bg = 0;
6807 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6808 phba->cfg_enable_npiv = 0;
6811 /* These SLI3 features are assumed in SLI4 */
6812 spin_lock_irq(&phba->hbalock);
6813 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6814 spin_unlock_irq(&phba->hbalock);
6817 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6818 * calls depends on these resources to complete port setup.
6820 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6822 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6823 "2920 Failed to alloc Resource IDs "
6828 lpfc_set_host_data(phba, mboxq);
6830 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6832 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6833 "2134 Failed to set host os driver version %x",
6837 /* Read the port's service parameters. */
6838 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6840 phba->link_state = LPFC_HBA_ERROR;
6845 mboxq->vport = vport;
6846 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6847 mp = (struct lpfc_dmabuf *) mboxq->context1;
6848 if (rc == MBX_SUCCESS) {
6849 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6854 * This memory was allocated by the lpfc_read_sparam routine. Release
6855 * it to the mbuf pool.
6857 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6859 mboxq->context1 = NULL;
6861 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6862 "0382 READ_SPARAM command failed "
6863 "status %d, mbxStatus x%x\n",
6864 rc, bf_get(lpfc_mqe_status, mqe));
6865 phba->link_state = LPFC_HBA_ERROR;
6870 lpfc_update_vport_wwn(vport);
6872 /* Update the fc_host data structures with new wwn. */
6873 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6874 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6876 /* Create all the SLI4 queues */
6877 rc = lpfc_sli4_queue_create(phba);
6879 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6880 "3089 Failed to allocate queues\n");
6884 /* Set up all the queues to the device */
6885 rc = lpfc_sli4_queue_setup(phba);
6887 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6888 "0381 Error %d during queue setup.\n ", rc);
6889 goto out_stop_timers;
6891 /* Initialize the driver internal SLI layer lists. */
6892 lpfc_sli4_setup(phba);
6893 lpfc_sli4_queue_init(phba);
6895 /* update host els xri-sgl sizes and mappings */
6896 rc = lpfc_sli4_els_sgl_update(phba);
6898 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6899 "1400 Failed to update xri-sgl size and "
6900 "mapping: %d\n", rc);
6901 goto out_destroy_queue;
6904 /* register the els sgl pool to the port */
6905 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6906 phba->sli4_hba.els_xri_cnt);
6907 if (unlikely(rc < 0)) {
6908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6909 "0582 Error %d during els sgl post "
6912 goto out_destroy_queue;
6914 phba->sli4_hba.els_xri_cnt = rc;
6916 if (phba->nvmet_support) {
6917 /* update host nvmet xri-sgl sizes and mappings */
6918 rc = lpfc_sli4_nvmet_sgl_update(phba);
6920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6921 "6308 Failed to update nvmet-sgl size "
6922 "and mapping: %d\n", rc);
6923 goto out_destroy_queue;
6926 /* register the nvmet sgl pool to the port */
6927 rc = lpfc_sli4_repost_sgl_list(
6929 &phba->sli4_hba.lpfc_nvmet_sgl_list,
6930 phba->sli4_hba.nvmet_xri_cnt);
6931 if (unlikely(rc < 0)) {
6932 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6933 "3117 Error %d during nvmet "
6936 goto out_destroy_queue;
6938 phba->sli4_hba.nvmet_xri_cnt = rc;
6940 cnt = phba->cfg_iocb_cnt * 1024;
6941 /* We need 1 iocbq for every SGL, for IO processing */
6942 cnt += phba->sli4_hba.nvmet_xri_cnt;
6944 /* update host scsi xri-sgl sizes and mappings */
6945 rc = lpfc_sli4_scsi_sgl_update(phba);
6947 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6948 "6309 Failed to update scsi-sgl size "
6949 "and mapping: %d\n", rc);
6950 goto out_destroy_queue;
6953 /* update host nvme xri-sgl sizes and mappings */
6954 rc = lpfc_sli4_nvme_sgl_update(phba);
6956 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6957 "6082 Failed to update nvme-sgl size "
6958 "and mapping: %d\n", rc);
6959 goto out_destroy_queue;
6962 cnt = phba->cfg_iocb_cnt * 1024;
6965 if (!phba->sli.iocbq_lookup) {
6966 /* Initialize and populate the iocb list per host */
6967 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6968 "2821 initialize iocb list %d total %d\n",
6969 phba->cfg_iocb_cnt, cnt);
6970 rc = lpfc_init_iocb_list(phba, cnt);
6972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6973 "1413 Failed to init iocb list.\n");
6974 goto out_destroy_queue;
6978 if (phba->nvmet_support)
6979 lpfc_nvmet_create_targetport(phba);
6981 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6982 /* Post initial buffers to all RQs created */
6983 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6984 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6985 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6986 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6987 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
6988 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
6989 rqbp->buffer_count = 0;
6991 lpfc_post_rq_buffer(
6992 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6993 phba->sli4_hba.nvmet_mrq_data[i],
6994 LPFC_NVMET_RQE_DEF_COUNT, i);
6998 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6999 /* register the allocated scsi sgl pool to the port */
7000 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7002 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7003 "0383 Error %d during scsi sgl post "
7005 /* Some Scsi buffers were moved to abort scsi list */
7006 /* A pci function reset will repost them */
7008 goto out_destroy_queue;
7012 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7013 (phba->nvmet_support == 0)) {
7015 /* register the allocated nvme sgl pool to the port */
7016 rc = lpfc_repost_nvme_sgl_list(phba);
7018 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7019 "6116 Error %d during nvme sgl post "
7021 /* Some NVME buffers were moved to abort nvme list */
7022 /* A pci function reset will repost them */
7024 goto out_destroy_queue;
7028 /* Post the rpi header region to the device. */
7029 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7031 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7032 "0393 Error %d during rpi post operation\n",
7035 goto out_free_iocblist;
7037 lpfc_sli4_node_prep(phba);
7039 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7040 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7042 * The FC Port needs to register FCFI (index 0)
7044 lpfc_reg_fcfi(phba, mboxq);
7045 mboxq->vport = phba->pport;
7046 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7047 if (rc != MBX_SUCCESS)
7048 goto out_unset_queue;
7050 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7051 &mboxq->u.mqe.un.reg_fcfi);
7053 /* We are a NVME Target mode with MRQ > 1 */
7055 /* First register the FCFI */
7056 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7057 mboxq->vport = phba->pport;
7058 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7059 if (rc != MBX_SUCCESS)
7060 goto out_unset_queue;
7062 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7063 &mboxq->u.mqe.un.reg_fcfi_mrq);
7065 /* Next register the MRQs */
7066 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7067 mboxq->vport = phba->pport;
7068 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7069 if (rc != MBX_SUCCESS)
7070 goto out_unset_queue;
7073 /* Check if the port is configured to be disabled */
7074 lpfc_sli_read_link_ste(phba);
7077 /* Arm the CQs and then EQs on device */
7078 lpfc_sli4_arm_cqeq_intr(phba);
7080 /* Indicate device interrupt mode */
7081 phba->sli4_hba.intr_enable = 1;
7083 /* Allow asynchronous mailbox command to go through */
7084 spin_lock_irq(&phba->hbalock);
7085 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7086 spin_unlock_irq(&phba->hbalock);
7088 /* Post receive buffers to the device */
7089 lpfc_sli4_rb_setup(phba);
7091 /* Reset HBA FCF states after HBA reset */
7092 phba->fcf.fcf_flag = 0;
7093 phba->fcf.current_rec.flag = 0;
7095 /* Start the ELS watchdog timer */
7096 mod_timer(&vport->els_tmofunc,
7097 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7099 /* Start heart beat timer */
7100 mod_timer(&phba->hb_tmofunc,
7101 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7102 phba->hb_outstanding = 0;
7103 phba->last_completion_time = jiffies;
7105 /* Start error attention (ERATT) polling timer */
7106 mod_timer(&phba->eratt_poll,
7107 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7109 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7110 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7111 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7114 "2829 This device supports "
7115 "Advanced Error Reporting (AER)\n");
7116 spin_lock_irq(&phba->hbalock);
7117 phba->hba_flag |= HBA_AER_ENABLED;
7118 spin_unlock_irq(&phba->hbalock);
7120 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7121 "2830 This device does not support "
7122 "Advanced Error Reporting (AER)\n");
7123 phba->cfg_aer_support = 0;
7129 * The port is ready, set the host's link state to LINK_DOWN
7130 * in preparation for link interrupts.
7132 spin_lock_irq(&phba->hbalock);
7133 phba->link_state = LPFC_LINK_DOWN;
7134 spin_unlock_irq(&phba->hbalock);
7135 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7136 (phba->hba_flag & LINK_DISABLED)) {
7137 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7138 "3103 Adapter Link is disabled.\n");
7139 lpfc_down_link(phba, mboxq);
7140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7141 if (rc != MBX_SUCCESS) {
7142 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7143 "3104 Adapter failed to issue "
7144 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7145 goto out_unset_queue;
7147 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7148 /* don't perform init_link on SLI4 FC port loopback test */
7149 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7150 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7152 goto out_unset_queue;
7155 mempool_free(mboxq, phba->mbox_mem_pool);
7158 /* Unset all the queues set up in this routine when error out */
7159 lpfc_sli4_queue_unset(phba);
7161 lpfc_free_iocb_list(phba);
7163 lpfc_sli4_queue_destroy(phba);
7165 lpfc_stop_hba_timers(phba);
7167 mempool_free(mboxq, phba->mbox_mem_pool);
7172 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7173 * @ptr: context object - pointer to hba structure.
7175 * This is the callback function for mailbox timer. The mailbox
7176 * timer is armed when a new mailbox command is issued and the timer
7177 * is deleted when the mailbox complete. The function is called by
7178 * the kernel timer code when a mailbox does not complete within
7179 * expected time. This function wakes up the worker thread to
7180 * process the mailbox timeout and returns. All the processing is
7181 * done by the worker thread function lpfc_mbox_timeout_handler.
7184 lpfc_mbox_timeout(unsigned long ptr)
7186 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
7187 unsigned long iflag;
7188 uint32_t tmo_posted;
7190 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7191 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7193 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7194 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7197 lpfc_worker_wake_up(phba);
7202 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7204 * @phba: Pointer to HBA context object.
7206 * This function checks if any mailbox completions are present on the mailbox
7210 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7214 struct lpfc_queue *mcq;
7215 struct lpfc_mcqe *mcqe;
7216 bool pending_completions = false;
7218 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7221 /* Check for completions on mailbox completion queue */
7223 mcq = phba->sli4_hba.mbx_cq;
7224 idx = mcq->hba_index;
7225 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
7226 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7227 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7228 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7229 pending_completions = true;
7232 idx = (idx + 1) % mcq->entry_count;
7233 if (mcq->hba_index == idx)
7236 return pending_completions;
7241 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7243 * @phba: Pointer to HBA context object.
7245 * For sli4, it is possible to miss an interrupt. As such mbox completions
7246 * maybe missed causing erroneous mailbox timeouts to occur. This function
7247 * checks to see if mbox completions are on the mailbox completion queue
7248 * and will process all the completions associated with the eq for the
7249 * mailbox completion queue.
7252 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7256 struct lpfc_queue *fpeq = NULL;
7257 struct lpfc_eqe *eqe;
7260 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7263 /* Find the eq associated with the mcq */
7265 if (phba->sli4_hba.hba_eq)
7266 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7267 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7268 phba->sli4_hba.mbx_cq->assoc_qid) {
7269 fpeq = phba->sli4_hba.hba_eq[eqidx];
7275 /* Turn off interrupts from this EQ */
7277 lpfc_sli4_eq_clr_intr(fpeq);
7279 /* Check to see if a mbox completion is pending */
7281 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7284 * If a mbox completion is pending, process all the events on EQ
7285 * associated with the mbox completion queue (this could include
7286 * mailbox commands, async events, els commands, receive queue data
7291 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7292 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7293 fpeq->EQ_processed++;
7296 /* Always clear and re-arm the EQ */
7298 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7300 return mbox_pending;
7305 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7306 * @phba: Pointer to HBA context object.
7308 * This function is called from worker thread when a mailbox command times out.
7309 * The caller is not required to hold any locks. This function will reset the
7310 * HBA and recover all the pending commands.
7313 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7315 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7316 MAILBOX_t *mb = NULL;
7318 struct lpfc_sli *psli = &phba->sli;
7320 /* If the mailbox completed, process the completion and return */
7321 if (lpfc_sli4_process_missed_mbox_completions(phba))
7326 /* Check the pmbox pointer first. There is a race condition
7327 * between the mbox timeout handler getting executed in the
7328 * worklist and the mailbox actually completing. When this
7329 * race condition occurs, the mbox_active will be NULL.
7331 spin_lock_irq(&phba->hbalock);
7332 if (pmbox == NULL) {
7333 lpfc_printf_log(phba, KERN_WARNING,
7335 "0353 Active Mailbox cleared - mailbox timeout "
7337 spin_unlock_irq(&phba->hbalock);
7341 /* Mbox cmd <mbxCommand> timeout */
7342 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7343 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7345 phba->pport->port_state,
7347 phba->sli.mbox_active);
7348 spin_unlock_irq(&phba->hbalock);
7350 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7351 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7352 * it to fail all outstanding SCSI IO.
7354 spin_lock_irq(&phba->pport->work_port_lock);
7355 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7356 spin_unlock_irq(&phba->pport->work_port_lock);
7357 spin_lock_irq(&phba->hbalock);
7358 phba->link_state = LPFC_LINK_UNKNOWN;
7359 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7360 spin_unlock_irq(&phba->hbalock);
7362 lpfc_sli_abort_fcp_rings(phba);
7364 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7365 "0345 Resetting board due to mailbox timeout\n");
7367 /* Reset the HBA device */
7368 lpfc_reset_hba(phba);
7372 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7373 * @phba: Pointer to HBA context object.
7374 * @pmbox: Pointer to mailbox object.
7375 * @flag: Flag indicating how the mailbox need to be processed.
7377 * This function is called by discovery code and HBA management code
7378 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7379 * function gets the hbalock to protect the data structures.
7380 * The mailbox command can be submitted in polling mode, in which case
7381 * this function will wait in a polling loop for the completion of the
7383 * If the mailbox is submitted in no_wait mode (not polling) the
7384 * function will submit the command and returns immediately without waiting
7385 * for the mailbox completion. The no_wait is supported only when HBA
7386 * is in SLI2/SLI3 mode - interrupts are enabled.
7387 * The SLI interface allows only one mailbox pending at a time. If the
7388 * mailbox is issued in polling mode and there is already a mailbox
7389 * pending, then the function will return an error. If the mailbox is issued
7390 * in NO_WAIT mode and there is a mailbox pending already, the function
7391 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7392 * The sli layer owns the mailbox object until the completion of mailbox
7393 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7394 * return codes the caller owns the mailbox command after the return of
7398 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7402 struct lpfc_sli *psli = &phba->sli;
7403 uint32_t status, evtctr;
7404 uint32_t ha_copy, hc_copy;
7406 unsigned long timeout;
7407 unsigned long drvr_flag = 0;
7408 uint32_t word0, ldata;
7409 void __iomem *to_slim;
7410 int processing_queue = 0;
7412 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7414 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7415 /* processing mbox queue from intr_handler */
7416 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7417 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7420 processing_queue = 1;
7421 pmbox = lpfc_mbox_get(phba);
7423 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7428 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7429 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7431 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7432 lpfc_printf_log(phba, KERN_ERR,
7433 LOG_MBOX | LOG_VPORT,
7434 "1806 Mbox x%x failed. No vport\n",
7435 pmbox->u.mb.mbxCommand);
7437 goto out_not_finished;
7441 /* If the PCI channel is in offline state, do not post mbox. */
7442 if (unlikely(pci_channel_offline(phba->pcidev))) {
7443 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7444 goto out_not_finished;
7447 /* If HBA has a deferred error attention, fail the iocb. */
7448 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7449 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7450 goto out_not_finished;
7456 status = MBX_SUCCESS;
7458 if (phba->link_state == LPFC_HBA_ERROR) {
7459 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7461 /* Mbox command <mbxCommand> cannot issue */
7462 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7463 "(%d):0311 Mailbox command x%x cannot "
7464 "issue Data: x%x x%x\n",
7465 pmbox->vport ? pmbox->vport->vpi : 0,
7466 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7467 goto out_not_finished;
7470 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7471 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7472 !(hc_copy & HC_MBINT_ENA)) {
7473 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7475 "(%d):2528 Mailbox command x%x cannot "
7476 "issue Data: x%x x%x\n",
7477 pmbox->vport ? pmbox->vport->vpi : 0,
7478 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7479 goto out_not_finished;
7483 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7484 /* Polling for a mbox command when another one is already active
7485 * is not allowed in SLI. Also, the driver must have established
7486 * SLI2 mode to queue and process multiple mbox commands.
7489 if (flag & MBX_POLL) {
7490 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7492 /* Mbox command <mbxCommand> cannot issue */
7493 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7494 "(%d):2529 Mailbox command x%x "
7495 "cannot issue Data: x%x x%x\n",
7496 pmbox->vport ? pmbox->vport->vpi : 0,
7497 pmbox->u.mb.mbxCommand,
7498 psli->sli_flag, flag);
7499 goto out_not_finished;
7502 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7503 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7504 /* Mbox command <mbxCommand> cannot issue */
7505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7506 "(%d):2530 Mailbox command x%x "
7507 "cannot issue Data: x%x x%x\n",
7508 pmbox->vport ? pmbox->vport->vpi : 0,
7509 pmbox->u.mb.mbxCommand,
7510 psli->sli_flag, flag);
7511 goto out_not_finished;
7514 /* Another mailbox command is still being processed, queue this
7515 * command to be processed later.
7517 lpfc_mbox_put(phba, pmbox);
7519 /* Mbox cmd issue - BUSY */
7520 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7521 "(%d):0308 Mbox cmd issue - BUSY Data: "
7522 "x%x x%x x%x x%x\n",
7523 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7525 phba->pport ? phba->pport->port_state : 0xff,
7526 psli->sli_flag, flag);
7528 psli->slistat.mbox_busy++;
7529 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7532 lpfc_debugfs_disc_trc(pmbox->vport,
7533 LPFC_DISC_TRC_MBOX_VPORT,
7534 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7535 (uint32_t)mbx->mbxCommand,
7536 mbx->un.varWords[0], mbx->un.varWords[1]);
7539 lpfc_debugfs_disc_trc(phba->pport,
7541 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7542 (uint32_t)mbx->mbxCommand,
7543 mbx->un.varWords[0], mbx->un.varWords[1]);
7549 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7551 /* If we are not polling, we MUST be in SLI2 mode */
7552 if (flag != MBX_POLL) {
7553 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7554 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7555 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7556 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7557 /* Mbox command <mbxCommand> cannot issue */
7558 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7559 "(%d):2531 Mailbox command x%x "
7560 "cannot issue Data: x%x x%x\n",
7561 pmbox->vport ? pmbox->vport->vpi : 0,
7562 pmbox->u.mb.mbxCommand,
7563 psli->sli_flag, flag);
7564 goto out_not_finished;
7566 /* timeout active mbox command */
7567 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7569 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7572 /* Mailbox cmd <cmd> issue */
7573 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7574 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7576 pmbox->vport ? pmbox->vport->vpi : 0,
7578 phba->pport ? phba->pport->port_state : 0xff,
7579 psli->sli_flag, flag);
7581 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7583 lpfc_debugfs_disc_trc(pmbox->vport,
7584 LPFC_DISC_TRC_MBOX_VPORT,
7585 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7586 (uint32_t)mbx->mbxCommand,
7587 mbx->un.varWords[0], mbx->un.varWords[1]);
7590 lpfc_debugfs_disc_trc(phba->pport,
7592 "MBOX Send: cmd:x%x mb:x%x x%x",
7593 (uint32_t)mbx->mbxCommand,
7594 mbx->un.varWords[0], mbx->un.varWords[1]);
7598 psli->slistat.mbox_cmd++;
7599 evtctr = psli->slistat.mbox_event;
7601 /* next set own bit for the adapter and copy over command word */
7602 mbx->mbxOwner = OWN_CHIP;
7604 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7605 /* Populate mbox extension offset word. */
7606 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7607 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7608 = (uint8_t *)phba->mbox_ext
7609 - (uint8_t *)phba->mbox;
7612 /* Copy the mailbox extension data */
7613 if (pmbox->in_ext_byte_len && pmbox->context2) {
7614 lpfc_sli_pcimem_bcopy(pmbox->context2,
7615 (uint8_t *)phba->mbox_ext,
7616 pmbox->in_ext_byte_len);
7618 /* Copy command data to host SLIM area */
7619 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7621 /* Populate mbox extension offset word. */
7622 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7623 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7624 = MAILBOX_HBA_EXT_OFFSET;
7626 /* Copy the mailbox extension data */
7627 if (pmbox->in_ext_byte_len && pmbox->context2)
7628 lpfc_memcpy_to_slim(phba->MBslimaddr +
7629 MAILBOX_HBA_EXT_OFFSET,
7630 pmbox->context2, pmbox->in_ext_byte_len);
7632 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7633 /* copy command data into host mbox for cmpl */
7634 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7637 /* First copy mbox command data to HBA SLIM, skip past first
7639 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7640 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7641 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7643 /* Next copy over first word, with mbxOwner set */
7644 ldata = *((uint32_t *)mbx);
7645 to_slim = phba->MBslimaddr;
7646 writel(ldata, to_slim);
7647 readl(to_slim); /* flush */
7649 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7650 /* switch over to host mailbox */
7651 psli->sli_flag |= LPFC_SLI_ACTIVE;
7658 /* Set up reference to mailbox command */
7659 psli->mbox_active = pmbox;
7660 /* Interrupt board to do it */
7661 writel(CA_MBATT, phba->CAregaddr);
7662 readl(phba->CAregaddr); /* flush */
7663 /* Don't wait for it to finish, just return */
7667 /* Set up null reference to mailbox command */
7668 psli->mbox_active = NULL;
7669 /* Interrupt board to do it */
7670 writel(CA_MBATT, phba->CAregaddr);
7671 readl(phba->CAregaddr); /* flush */
7673 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7674 /* First read mbox status word */
7675 word0 = *((uint32_t *)phba->mbox);
7676 word0 = le32_to_cpu(word0);
7678 /* First read mbox status word */
7679 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7680 spin_unlock_irqrestore(&phba->hbalock,
7682 goto out_not_finished;
7686 /* Read the HBA Host Attention Register */
7687 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7688 spin_unlock_irqrestore(&phba->hbalock,
7690 goto out_not_finished;
7692 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7695 /* Wait for command to complete */
7696 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7697 (!(ha_copy & HA_MBATT) &&
7698 (phba->link_state > LPFC_WARM_START))) {
7699 if (time_after(jiffies, timeout)) {
7700 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7701 spin_unlock_irqrestore(&phba->hbalock,
7703 goto out_not_finished;
7706 /* Check if we took a mbox interrupt while we were
7708 if (((word0 & OWN_CHIP) != OWN_CHIP)
7709 && (evtctr != psli->slistat.mbox_event))
7713 spin_unlock_irqrestore(&phba->hbalock,
7716 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7719 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7720 /* First copy command data */
7721 word0 = *((uint32_t *)phba->mbox);
7722 word0 = le32_to_cpu(word0);
7723 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7726 /* Check real SLIM for any errors */
7727 slimword0 = readl(phba->MBslimaddr);
7728 slimmb = (MAILBOX_t *) & slimword0;
7729 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7730 && slimmb->mbxStatus) {
7737 /* First copy command data */
7738 word0 = readl(phba->MBslimaddr);
7740 /* Read the HBA Host Attention Register */
7741 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7742 spin_unlock_irqrestore(&phba->hbalock,
7744 goto out_not_finished;
7748 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7749 /* copy results back to user */
7750 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7752 /* Copy the mailbox extension data */
7753 if (pmbox->out_ext_byte_len && pmbox->context2) {
7754 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7756 pmbox->out_ext_byte_len);
7759 /* First copy command data */
7760 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7762 /* Copy the mailbox extension data */
7763 if (pmbox->out_ext_byte_len && pmbox->context2) {
7764 lpfc_memcpy_from_slim(pmbox->context2,
7766 MAILBOX_HBA_EXT_OFFSET,
7767 pmbox->out_ext_byte_len);
7771 writel(HA_MBATT, phba->HAregaddr);
7772 readl(phba->HAregaddr); /* flush */
7774 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7775 status = mbx->mbxStatus;
7778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7782 if (processing_queue) {
7783 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7784 lpfc_mbox_cmpl_put(phba, pmbox);
7786 return MBX_NOT_FINISHED;
7790 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7791 * @phba: Pointer to HBA context object.
7793 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7794 * the driver internal pending mailbox queue. It will then try to wait out the
7795 * possible outstanding mailbox command before return.
7798 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7799 * the outstanding mailbox command timed out.
7802 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7804 struct lpfc_sli *psli = &phba->sli;
7806 unsigned long timeout = 0;
7808 /* Mark the asynchronous mailbox command posting as blocked */
7809 spin_lock_irq(&phba->hbalock);
7810 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7811 /* Determine how long we might wait for the active mailbox
7812 * command to be gracefully completed by firmware.
7814 if (phba->sli.mbox_active)
7815 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7816 phba->sli.mbox_active) *
7818 spin_unlock_irq(&phba->hbalock);
7820 /* Make sure the mailbox is really active */
7822 lpfc_sli4_process_missed_mbox_completions(phba);
7824 /* Wait for the outstnading mailbox command to complete */
7825 while (phba->sli.mbox_active) {
7826 /* Check active mailbox complete status every 2ms */
7828 if (time_after(jiffies, timeout)) {
7829 /* Timeout, marked the outstanding cmd not complete */
7835 /* Can not cleanly block async mailbox command, fails it */
7837 spin_lock_irq(&phba->hbalock);
7838 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7839 spin_unlock_irq(&phba->hbalock);
7845 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7846 * @phba: Pointer to HBA context object.
7848 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7849 * commands from the driver internal pending mailbox queue. It makes sure
7850 * that there is no outstanding mailbox command before resuming posting
7851 * asynchronous mailbox commands. If, for any reason, there is outstanding
7852 * mailbox command, it will try to wait it out before resuming asynchronous
7853 * mailbox command posting.
7856 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7858 struct lpfc_sli *psli = &phba->sli;
7860 spin_lock_irq(&phba->hbalock);
7861 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7862 /* Asynchronous mailbox posting is not blocked, do nothing */
7863 spin_unlock_irq(&phba->hbalock);
7867 /* Outstanding synchronous mailbox command is guaranteed to be done,
7868 * successful or timeout, after timing-out the outstanding mailbox
7869 * command shall always be removed, so just unblock posting async
7870 * mailbox command and resume
7872 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7873 spin_unlock_irq(&phba->hbalock);
7875 /* wake up worker thread to post asynchronlous mailbox command */
7876 lpfc_worker_wake_up(phba);
7880 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7881 * @phba: Pointer to HBA context object.
7882 * @mboxq: Pointer to mailbox object.
7884 * The function waits for the bootstrap mailbox register ready bit from
7885 * port for twice the regular mailbox command timeout value.
7887 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7888 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7891 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7894 unsigned long timeout;
7895 struct lpfc_register bmbx_reg;
7897 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7901 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7902 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7906 if (time_after(jiffies, timeout))
7907 return MBXERR_ERROR;
7908 } while (!db_ready);
7914 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7915 * @phba: Pointer to HBA context object.
7916 * @mboxq: Pointer to mailbox object.
7918 * The function posts a mailbox to the port. The mailbox is expected
7919 * to be comletely filled in and ready for the port to operate on it.
7920 * This routine executes a synchronous completion operation on the
7921 * mailbox by polling for its completion.
7923 * The caller must not be holding any locks when calling this routine.
7926 * MBX_SUCCESS - mailbox posted successfully
7927 * Any of the MBX error values.
7930 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7932 int rc = MBX_SUCCESS;
7933 unsigned long iflag;
7934 uint32_t mcqe_status;
7936 struct lpfc_sli *psli = &phba->sli;
7937 struct lpfc_mqe *mb = &mboxq->u.mqe;
7938 struct lpfc_bmbx_create *mbox_rgn;
7939 struct dma_address *dma_address;
7942 * Only one mailbox can be active to the bootstrap mailbox region
7943 * at a time and there is no queueing provided.
7945 spin_lock_irqsave(&phba->hbalock, iflag);
7946 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7947 spin_unlock_irqrestore(&phba->hbalock, iflag);
7948 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7949 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7950 "cannot issue Data: x%x x%x\n",
7951 mboxq->vport ? mboxq->vport->vpi : 0,
7952 mboxq->u.mb.mbxCommand,
7953 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7954 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7955 psli->sli_flag, MBX_POLL);
7956 return MBXERR_ERROR;
7958 /* The server grabs the token and owns it until release */
7959 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7960 phba->sli.mbox_active = mboxq;
7961 spin_unlock_irqrestore(&phba->hbalock, iflag);
7963 /* wait for bootstrap mbox register for readyness */
7964 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7969 * Initialize the bootstrap memory region to avoid stale data areas
7970 * in the mailbox post. Then copy the caller's mailbox contents to
7971 * the bmbx mailbox region.
7973 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7974 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7975 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7976 sizeof(struct lpfc_mqe));
7978 /* Post the high mailbox dma address to the port and wait for ready. */
7979 dma_address = &phba->sli4_hba.bmbx.dma_address;
7980 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7982 /* wait for bootstrap mbox register for hi-address write done */
7983 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7987 /* Post the low mailbox dma address to the port. */
7988 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7990 /* wait for bootstrap mbox register for low address write done */
7991 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7996 * Read the CQ to ensure the mailbox has completed.
7997 * If so, update the mailbox status so that the upper layers
7998 * can complete the request normally.
8000 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8001 sizeof(struct lpfc_mqe));
8002 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8003 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8004 sizeof(struct lpfc_mcqe));
8005 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8007 * When the CQE status indicates a failure and the mailbox status
8008 * indicates success then copy the CQE status into the mailbox status
8009 * (and prefix it with x4000).
8011 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8012 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8013 bf_set(lpfc_mqe_status, mb,
8014 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8017 lpfc_sli4_swap_str(phba, mboxq);
8019 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8020 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8021 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8022 " x%x x%x CQ: x%x x%x x%x x%x\n",
8023 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8024 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8025 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8026 bf_get(lpfc_mqe_status, mb),
8027 mb->un.mb_words[0], mb->un.mb_words[1],
8028 mb->un.mb_words[2], mb->un.mb_words[3],
8029 mb->un.mb_words[4], mb->un.mb_words[5],
8030 mb->un.mb_words[6], mb->un.mb_words[7],
8031 mb->un.mb_words[8], mb->un.mb_words[9],
8032 mb->un.mb_words[10], mb->un.mb_words[11],
8033 mb->un.mb_words[12], mboxq->mcqe.word0,
8034 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8035 mboxq->mcqe.trailer);
8037 /* We are holding the token, no needed for lock when release */
8038 spin_lock_irqsave(&phba->hbalock, iflag);
8039 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8040 phba->sli.mbox_active = NULL;
8041 spin_unlock_irqrestore(&phba->hbalock, iflag);
8046 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8047 * @phba: Pointer to HBA context object.
8048 * @pmbox: Pointer to mailbox object.
8049 * @flag: Flag indicating how the mailbox need to be processed.
8051 * This function is called by discovery code and HBA management code to submit
8052 * a mailbox command to firmware with SLI-4 interface spec.
8054 * Return codes the caller owns the mailbox command after the return of the
8058 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8061 struct lpfc_sli *psli = &phba->sli;
8062 unsigned long iflags;
8065 /* dump from issue mailbox command if setup */
8066 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8068 rc = lpfc_mbox_dev_check(phba);
8070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8071 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8072 "cannot issue Data: x%x x%x\n",
8073 mboxq->vport ? mboxq->vport->vpi : 0,
8074 mboxq->u.mb.mbxCommand,
8075 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8076 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8077 psli->sli_flag, flag);
8078 goto out_not_finished;
8081 /* Detect polling mode and jump to a handler */
8082 if (!phba->sli4_hba.intr_enable) {
8083 if (flag == MBX_POLL)
8084 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8087 if (rc != MBX_SUCCESS)
8088 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8089 "(%d):2541 Mailbox command x%x "
8090 "(x%x/x%x) failure: "
8091 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8093 mboxq->vport ? mboxq->vport->vpi : 0,
8094 mboxq->u.mb.mbxCommand,
8095 lpfc_sli_config_mbox_subsys_get(phba,
8097 lpfc_sli_config_mbox_opcode_get(phba,
8099 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8100 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8101 bf_get(lpfc_mcqe_ext_status,
8103 psli->sli_flag, flag);
8105 } else if (flag == MBX_POLL) {
8106 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8107 "(%d):2542 Try to issue mailbox command "
8108 "x%x (x%x/x%x) synchronously ahead of async"
8109 "mailbox command queue: x%x x%x\n",
8110 mboxq->vport ? mboxq->vport->vpi : 0,
8111 mboxq->u.mb.mbxCommand,
8112 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8113 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8114 psli->sli_flag, flag);
8115 /* Try to block the asynchronous mailbox posting */
8116 rc = lpfc_sli4_async_mbox_block(phba);
8118 /* Successfully blocked, now issue sync mbox cmd */
8119 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8120 if (rc != MBX_SUCCESS)
8121 lpfc_printf_log(phba, KERN_WARNING,
8123 "(%d):2597 Sync Mailbox command "
8124 "x%x (x%x/x%x) failure: "
8125 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8127 mboxq->vport ? mboxq->vport->vpi : 0,
8128 mboxq->u.mb.mbxCommand,
8129 lpfc_sli_config_mbox_subsys_get(phba,
8131 lpfc_sli_config_mbox_opcode_get(phba,
8133 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8134 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8135 bf_get(lpfc_mcqe_ext_status,
8137 psli->sli_flag, flag);
8138 /* Unblock the async mailbox posting afterward */
8139 lpfc_sli4_async_mbox_unblock(phba);
8144 /* Now, interrupt mode asynchrous mailbox command */
8145 rc = lpfc_mbox_cmd_check(phba, mboxq);
8147 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8148 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8149 "cannot issue Data: x%x x%x\n",
8150 mboxq->vport ? mboxq->vport->vpi : 0,
8151 mboxq->u.mb.mbxCommand,
8152 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8153 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8154 psli->sli_flag, flag);
8155 goto out_not_finished;
8158 /* Put the mailbox command to the driver internal FIFO */
8159 psli->slistat.mbox_busy++;
8160 spin_lock_irqsave(&phba->hbalock, iflags);
8161 lpfc_mbox_put(phba, mboxq);
8162 spin_unlock_irqrestore(&phba->hbalock, iflags);
8163 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8164 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8165 "x%x (x%x/x%x) x%x x%x x%x\n",
8166 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8167 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8168 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8169 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8170 phba->pport->port_state,
8171 psli->sli_flag, MBX_NOWAIT);
8172 /* Wake up worker thread to transport mailbox command from head */
8173 lpfc_worker_wake_up(phba);
8178 return MBX_NOT_FINISHED;
8182 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8183 * @phba: Pointer to HBA context object.
8185 * This function is called by worker thread to send a mailbox command to
8186 * SLI4 HBA firmware.
8190 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8192 struct lpfc_sli *psli = &phba->sli;
8193 LPFC_MBOXQ_t *mboxq;
8194 int rc = MBX_SUCCESS;
8195 unsigned long iflags;
8196 struct lpfc_mqe *mqe;
8199 /* Check interrupt mode before post async mailbox command */
8200 if (unlikely(!phba->sli4_hba.intr_enable))
8201 return MBX_NOT_FINISHED;
8203 /* Check for mailbox command service token */
8204 spin_lock_irqsave(&phba->hbalock, iflags);
8205 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8206 spin_unlock_irqrestore(&phba->hbalock, iflags);
8207 return MBX_NOT_FINISHED;
8209 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8210 spin_unlock_irqrestore(&phba->hbalock, iflags);
8211 return MBX_NOT_FINISHED;
8213 if (unlikely(phba->sli.mbox_active)) {
8214 spin_unlock_irqrestore(&phba->hbalock, iflags);
8215 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8216 "0384 There is pending active mailbox cmd\n");
8217 return MBX_NOT_FINISHED;
8219 /* Take the mailbox command service token */
8220 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8222 /* Get the next mailbox command from head of queue */
8223 mboxq = lpfc_mbox_get(phba);
8225 /* If no more mailbox command waiting for post, we're done */
8227 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8228 spin_unlock_irqrestore(&phba->hbalock, iflags);
8231 phba->sli.mbox_active = mboxq;
8232 spin_unlock_irqrestore(&phba->hbalock, iflags);
8234 /* Check device readiness for posting mailbox command */
8235 rc = lpfc_mbox_dev_check(phba);
8237 /* Driver clean routine will clean up pending mailbox */
8238 goto out_not_finished;
8240 /* Prepare the mbox command to be posted */
8241 mqe = &mboxq->u.mqe;
8242 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8244 /* Start timer for the mbox_tmo and log some mailbox post messages */
8245 mod_timer(&psli->mbox_tmo, (jiffies +
8246 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8249 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8251 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8252 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8253 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8254 phba->pport->port_state, psli->sli_flag);
8256 if (mbx_cmnd != MBX_HEARTBEAT) {
8258 lpfc_debugfs_disc_trc(mboxq->vport,
8259 LPFC_DISC_TRC_MBOX_VPORT,
8260 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8261 mbx_cmnd, mqe->un.mb_words[0],
8262 mqe->un.mb_words[1]);
8264 lpfc_debugfs_disc_trc(phba->pport,
8266 "MBOX Send: cmd:x%x mb:x%x x%x",
8267 mbx_cmnd, mqe->un.mb_words[0],
8268 mqe->un.mb_words[1]);
8271 psli->slistat.mbox_cmd++;
8273 /* Post the mailbox command to the port */
8274 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8275 if (rc != MBX_SUCCESS) {
8276 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8277 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8278 "cannot issue Data: x%x x%x\n",
8279 mboxq->vport ? mboxq->vport->vpi : 0,
8280 mboxq->u.mb.mbxCommand,
8281 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8282 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8283 psli->sli_flag, MBX_NOWAIT);
8284 goto out_not_finished;
8290 spin_lock_irqsave(&phba->hbalock, iflags);
8291 if (phba->sli.mbox_active) {
8292 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8293 __lpfc_mbox_cmpl_put(phba, mboxq);
8294 /* Release the token */
8295 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8296 phba->sli.mbox_active = NULL;
8298 spin_unlock_irqrestore(&phba->hbalock, iflags);
8300 return MBX_NOT_FINISHED;
8304 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8305 * @phba: Pointer to HBA context object.
8306 * @pmbox: Pointer to mailbox object.
8307 * @flag: Flag indicating how the mailbox need to be processed.
8309 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8310 * the API jump table function pointer from the lpfc_hba struct.
8312 * Return codes the caller owns the mailbox command after the return of the
8316 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8318 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8322 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8323 * @phba: The hba struct for which this call is being executed.
8324 * @dev_grp: The HBA PCI-Device group number.
8326 * This routine sets up the mbox interface API function jump table in @phba
8328 * Returns: 0 - success, -ENODEV - failure.
8331 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8335 case LPFC_PCI_DEV_LP:
8336 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8337 phba->lpfc_sli_handle_slow_ring_event =
8338 lpfc_sli_handle_slow_ring_event_s3;
8339 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8340 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8341 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8343 case LPFC_PCI_DEV_OC:
8344 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8345 phba->lpfc_sli_handle_slow_ring_event =
8346 lpfc_sli_handle_slow_ring_event_s4;
8347 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8348 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8349 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8353 "1420 Invalid HBA PCI-device group: 0x%x\n",
8362 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8363 * @phba: Pointer to HBA context object.
8364 * @pring: Pointer to driver SLI ring object.
8365 * @piocb: Pointer to address of newly added command iocb.
8367 * This function is called with hbalock held to add a command
8368 * iocb to the txq when SLI layer cannot submit the command iocb
8372 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8373 struct lpfc_iocbq *piocb)
8375 lockdep_assert_held(&phba->hbalock);
8376 /* Insert the caller's iocb in the txq tail for later processing. */
8377 list_add_tail(&piocb->list, &pring->txq);
8381 * lpfc_sli_next_iocb - Get the next iocb in the txq
8382 * @phba: Pointer to HBA context object.
8383 * @pring: Pointer to driver SLI ring object.
8384 * @piocb: Pointer to address of newly added command iocb.
8386 * This function is called with hbalock held before a new
8387 * iocb is submitted to the firmware. This function checks
8388 * txq to flush the iocbs in txq to Firmware before
8389 * submitting new iocbs to the Firmware.
8390 * If there are iocbs in the txq which need to be submitted
8391 * to firmware, lpfc_sli_next_iocb returns the first element
8392 * of the txq after dequeuing it from txq.
8393 * If there is no iocb in the txq then the function will return
8394 * *piocb and *piocb is set to NULL. Caller needs to check
8395 * *piocb to find if there are more commands in the txq.
8397 static struct lpfc_iocbq *
8398 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8399 struct lpfc_iocbq **piocb)
8401 struct lpfc_iocbq * nextiocb;
8403 lockdep_assert_held(&phba->hbalock);
8405 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8415 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8416 * @phba: Pointer to HBA context object.
8417 * @ring_number: SLI ring number to issue iocb on.
8418 * @piocb: Pointer to command iocb.
8419 * @flag: Flag indicating if this command can be put into txq.
8421 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8422 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8423 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8424 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8425 * this function allows only iocbs for posting buffers. This function finds
8426 * next available slot in the command ring and posts the command to the
8427 * available slot and writes the port attention register to request HBA start
8428 * processing new iocb. If there is no slot available in the ring and
8429 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8430 * the function returns IOCB_BUSY.
8432 * This function is called with hbalock held. The function will return success
8433 * after it successfully submit the iocb to firmware or after adding to the
8437 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8438 struct lpfc_iocbq *piocb, uint32_t flag)
8440 struct lpfc_iocbq *nextiocb;
8442 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8444 lockdep_assert_held(&phba->hbalock);
8446 if (piocb->iocb_cmpl && (!piocb->vport) &&
8447 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8448 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8449 lpfc_printf_log(phba, KERN_ERR,
8450 LOG_SLI | LOG_VPORT,
8451 "1807 IOCB x%x failed. No vport\n",
8452 piocb->iocb.ulpCommand);
8458 /* If the PCI channel is in offline state, do not post iocbs. */
8459 if (unlikely(pci_channel_offline(phba->pcidev)))
8462 /* If HBA has a deferred error attention, fail the iocb. */
8463 if (unlikely(phba->hba_flag & DEFER_ERATT))
8467 * We should never get an IOCB if we are in a < LINK_DOWN state
8469 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8473 * Check to see if we are blocking IOCB processing because of a
8474 * outstanding event.
8476 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8479 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8481 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8482 * can be issued if the link is not up.
8484 switch (piocb->iocb.ulpCommand) {
8485 case CMD_GEN_REQUEST64_CR:
8486 case CMD_GEN_REQUEST64_CX:
8487 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8488 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8489 FC_RCTL_DD_UNSOL_CMD) ||
8490 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8491 MENLO_TRANSPORT_TYPE))
8495 case CMD_QUE_RING_BUF_CN:
8496 case CMD_QUE_RING_BUF64_CN:
8498 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8499 * completion, iocb_cmpl MUST be 0.
8501 if (piocb->iocb_cmpl)
8502 piocb->iocb_cmpl = NULL;
8504 case CMD_CREATE_XRI_CR:
8505 case CMD_CLOSE_XRI_CN:
8506 case CMD_CLOSE_XRI_CX:
8513 * For FCP commands, we must be in a state where we can process link
8516 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8517 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8521 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8522 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8523 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8526 lpfc_sli_update_ring(phba, pring);
8528 lpfc_sli_update_full_ring(phba, pring);
8531 return IOCB_SUCCESS;
8536 pring->stats.iocb_cmd_delay++;
8540 if (!(flag & SLI_IOCB_RET_IOCB)) {
8541 __lpfc_sli_ringtx_put(phba, pring, piocb);
8542 return IOCB_SUCCESS;
8549 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8550 * @phba: Pointer to HBA context object.
8551 * @piocb: Pointer to command iocb.
8552 * @sglq: Pointer to the scatter gather queue object.
8554 * This routine converts the bpl or bde that is in the IOCB
8555 * to a sgl list for the sli4 hardware. The physical address
8556 * of the bpl/bde is converted back to a virtual address.
8557 * If the IOCB contains a BPL then the list of BDE's is
8558 * converted to sli4_sge's. If the IOCB contains a single
8559 * BDE then it is converted to a single sli_sge.
8560 * The IOCB is still in cpu endianess so the contents of
8561 * the bpl can be used without byte swapping.
8563 * Returns valid XRI = Success, NO_XRI = Failure.
8566 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8567 struct lpfc_sglq *sglq)
8569 uint16_t xritag = NO_XRI;
8570 struct ulp_bde64 *bpl = NULL;
8571 struct ulp_bde64 bde;
8572 struct sli4_sge *sgl = NULL;
8573 struct lpfc_dmabuf *dmabuf;
8577 uint32_t offset = 0; /* accumulated offset in the sg request list */
8578 int inbound = 0; /* number of sg reply entries inbound from firmware */
8580 if (!piocbq || !sglq)
8583 sgl = (struct sli4_sge *)sglq->sgl;
8584 icmd = &piocbq->iocb;
8585 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8586 return sglq->sli4_xritag;
8587 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8588 numBdes = icmd->un.genreq64.bdl.bdeSize /
8589 sizeof(struct ulp_bde64);
8590 /* The addrHigh and addrLow fields within the IOCB
8591 * have not been byteswapped yet so there is no
8592 * need to swap them back.
8594 if (piocbq->context3)
8595 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8599 bpl = (struct ulp_bde64 *)dmabuf->virt;
8603 for (i = 0; i < numBdes; i++) {
8604 /* Should already be byte swapped. */
8605 sgl->addr_hi = bpl->addrHigh;
8606 sgl->addr_lo = bpl->addrLow;
8608 sgl->word2 = le32_to_cpu(sgl->word2);
8609 if ((i+1) == numBdes)
8610 bf_set(lpfc_sli4_sge_last, sgl, 1);
8612 bf_set(lpfc_sli4_sge_last, sgl, 0);
8613 /* swap the size field back to the cpu so we
8614 * can assign it to the sgl.
8616 bde.tus.w = le32_to_cpu(bpl->tus.w);
8617 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8618 /* The offsets in the sgl need to be accumulated
8619 * separately for the request and reply lists.
8620 * The request is always first, the reply follows.
8622 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8623 /* add up the reply sg entries */
8624 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8626 /* first inbound? reset the offset */
8629 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8630 bf_set(lpfc_sli4_sge_type, sgl,
8631 LPFC_SGE_TYPE_DATA);
8632 offset += bde.tus.f.bdeSize;
8634 sgl->word2 = cpu_to_le32(sgl->word2);
8638 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8639 /* The addrHigh and addrLow fields of the BDE have not
8640 * been byteswapped yet so they need to be swapped
8641 * before putting them in the sgl.
8644 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8646 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8647 sgl->word2 = le32_to_cpu(sgl->word2);
8648 bf_set(lpfc_sli4_sge_last, sgl, 1);
8649 sgl->word2 = cpu_to_le32(sgl->word2);
8651 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8653 return sglq->sli4_xritag;
8657 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8658 * @phba: Pointer to HBA context object.
8659 * @piocb: Pointer to command iocb.
8660 * @wqe: Pointer to the work queue entry.
8662 * This routine converts the iocb command to its Work Queue Entry
8663 * equivalent. The wqe pointer should not have any fields set when
8664 * this routine is called because it will memcpy over them.
8665 * This routine does not set the CQ_ID or the WQEC bits in the
8668 * Returns: 0 = Success, IOCB_ERROR = Failure.
8671 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8672 union lpfc_wqe *wqe)
8674 uint32_t xmit_len = 0, total_len = 0;
8678 uint8_t command_type = ELS_COMMAND_NON_FIP;
8681 uint16_t abrt_iotag;
8682 struct lpfc_iocbq *abrtiocbq;
8683 struct ulp_bde64 *bpl = NULL;
8684 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8686 struct ulp_bde64 bde;
8687 struct lpfc_nodelist *ndlp;
8691 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8692 /* The fcp commands will set command type */
8693 if (iocbq->iocb_flag & LPFC_IO_FCP)
8694 command_type = FCP_COMMAND;
8695 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8696 command_type = ELS_COMMAND_FIP;
8698 command_type = ELS_COMMAND_NON_FIP;
8700 if (phba->fcp_embed_io)
8701 memset(wqe, 0, sizeof(union lpfc_wqe128));
8702 /* Some of the fields are in the right position already */
8703 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8704 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8705 /* The ct field has moved so reset */
8706 wqe->generic.wqe_com.word7 = 0;
8707 wqe->generic.wqe_com.word10 = 0;
8710 abort_tag = (uint32_t) iocbq->iotag;
8711 xritag = iocbq->sli4_xritag;
8712 /* words0-2 bpl convert bde */
8713 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8714 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8715 sizeof(struct ulp_bde64);
8716 bpl = (struct ulp_bde64 *)
8717 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8721 /* Should already be byte swapped. */
8722 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8723 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8724 /* swap the size field back to the cpu so we
8725 * can assign it to the sgl.
8727 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8728 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8730 for (i = 0; i < numBdes; i++) {
8731 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8732 total_len += bde.tus.f.bdeSize;
8735 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8737 iocbq->iocb.ulpIoTag = iocbq->iotag;
8738 cmnd = iocbq->iocb.ulpCommand;
8740 switch (iocbq->iocb.ulpCommand) {
8741 case CMD_ELS_REQUEST64_CR:
8742 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8743 ndlp = iocbq->context_un.ndlp;
8745 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8746 if (!iocbq->iocb.ulpLe) {
8747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8748 "2007 Only Limited Edition cmd Format"
8749 " supported 0x%x\n",
8750 iocbq->iocb.ulpCommand);
8754 wqe->els_req.payload_len = xmit_len;
8755 /* Els_reguest64 has a TMO */
8756 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8757 iocbq->iocb.ulpTimeout);
8758 /* Need a VF for word 4 set the vf bit*/
8759 bf_set(els_req64_vf, &wqe->els_req, 0);
8760 /* And a VFID for word 12 */
8761 bf_set(els_req64_vfid, &wqe->els_req, 0);
8762 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8763 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8764 iocbq->iocb.ulpContext);
8765 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8766 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8767 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8768 if (command_type == ELS_COMMAND_FIP)
8769 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8770 >> LPFC_FIP_ELS_ID_SHIFT);
8771 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8772 iocbq->context2)->virt);
8773 if_type = bf_get(lpfc_sli_intf_if_type,
8774 &phba->sli4_hba.sli_intf);
8775 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8776 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8777 *pcmd == ELS_CMD_SCR ||
8778 *pcmd == ELS_CMD_FDISC ||
8779 *pcmd == ELS_CMD_LOGO ||
8780 *pcmd == ELS_CMD_PLOGI)) {
8781 bf_set(els_req64_sp, &wqe->els_req, 1);
8782 bf_set(els_req64_sid, &wqe->els_req,
8783 iocbq->vport->fc_myDID);
8784 if ((*pcmd == ELS_CMD_FLOGI) &&
8785 !(phba->fc_topology ==
8786 LPFC_TOPOLOGY_LOOP))
8787 bf_set(els_req64_sid, &wqe->els_req, 0);
8788 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8789 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8790 phba->vpi_ids[iocbq->vport->vpi]);
8791 } else if (pcmd && iocbq->context1) {
8792 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8793 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8794 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8797 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8798 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8799 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8800 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8801 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8802 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8803 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8804 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8805 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8807 case CMD_XMIT_SEQUENCE64_CX:
8808 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8809 iocbq->iocb.un.ulpWord[3]);
8810 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8811 iocbq->iocb.unsli3.rcvsli3.ox_id);
8812 /* The entire sequence is transmitted for this IOCB */
8813 xmit_len = total_len;
8814 cmnd = CMD_XMIT_SEQUENCE64_CR;
8815 if (phba->link_flag & LS_LOOPBACK_MODE)
8816 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8817 case CMD_XMIT_SEQUENCE64_CR:
8818 /* word3 iocb=io_tag32 wqe=reserved */
8819 wqe->xmit_sequence.rsvd3 = 0;
8820 /* word4 relative_offset memcpy */
8821 /* word5 r_ctl/df_ctl memcpy */
8822 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8823 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8824 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8825 LPFC_WQE_IOD_WRITE);
8826 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8827 LPFC_WQE_LENLOC_WORD12);
8828 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8829 wqe->xmit_sequence.xmit_len = xmit_len;
8830 command_type = OTHER_COMMAND;
8832 case CMD_XMIT_BCAST64_CN:
8833 /* word3 iocb=iotag32 wqe=seq_payload_len */
8834 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8835 /* word4 iocb=rsvd wqe=rsvd */
8836 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8837 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8838 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8839 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8840 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8841 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8842 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8843 LPFC_WQE_LENLOC_WORD3);
8844 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8846 case CMD_FCP_IWRITE64_CR:
8847 command_type = FCP_COMMAND_DATA_OUT;
8848 /* word3 iocb=iotag wqe=payload_offset_len */
8849 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8850 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8851 xmit_len + sizeof(struct fcp_rsp));
8852 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8854 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8855 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8856 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8857 iocbq->iocb.ulpFCP2Rcvy);
8858 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8859 /* Always open the exchange */
8860 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8861 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8862 LPFC_WQE_LENLOC_WORD4);
8863 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8864 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8865 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8866 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8867 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8868 if (iocbq->priority) {
8869 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8870 (iocbq->priority << 1));
8872 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8873 (phba->cfg_XLanePriority << 1));
8876 /* Note, word 10 is already initialized to 0 */
8878 if (phba->fcp_embed_io) {
8879 struct lpfc_scsi_buf *lpfc_cmd;
8880 struct sli4_sge *sgl;
8881 union lpfc_wqe128 *wqe128;
8882 struct fcp_cmnd *fcp_cmnd;
8885 /* 128 byte wqe support here */
8886 wqe128 = (union lpfc_wqe128 *)wqe;
8888 lpfc_cmd = iocbq->context1;
8889 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8890 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8892 /* Word 0-2 - FCP_CMND */
8893 wqe128->generic.bde.tus.f.bdeFlags =
8894 BUFF_TYPE_BDE_IMMED;
8895 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8896 wqe128->generic.bde.addrHigh = 0;
8897 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8899 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8901 /* Word 22-29 FCP CMND Payload */
8902 ptr = &wqe128->words[22];
8903 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8906 case CMD_FCP_IREAD64_CR:
8907 /* word3 iocb=iotag wqe=payload_offset_len */
8908 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8909 bf_set(payload_offset_len, &wqe->fcp_iread,
8910 xmit_len + sizeof(struct fcp_rsp));
8911 bf_set(cmd_buff_len, &wqe->fcp_iread,
8913 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8914 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8915 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8916 iocbq->iocb.ulpFCP2Rcvy);
8917 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8918 /* Always open the exchange */
8919 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8920 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8921 LPFC_WQE_LENLOC_WORD4);
8922 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8923 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8924 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8925 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8926 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8927 if (iocbq->priority) {
8928 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8929 (iocbq->priority << 1));
8931 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8932 (phba->cfg_XLanePriority << 1));
8935 /* Note, word 10 is already initialized to 0 */
8937 if (phba->fcp_embed_io) {
8938 struct lpfc_scsi_buf *lpfc_cmd;
8939 struct sli4_sge *sgl;
8940 union lpfc_wqe128 *wqe128;
8941 struct fcp_cmnd *fcp_cmnd;
8944 /* 128 byte wqe support here */
8945 wqe128 = (union lpfc_wqe128 *)wqe;
8947 lpfc_cmd = iocbq->context1;
8948 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8949 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8951 /* Word 0-2 - FCP_CMND */
8952 wqe128->generic.bde.tus.f.bdeFlags =
8953 BUFF_TYPE_BDE_IMMED;
8954 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8955 wqe128->generic.bde.addrHigh = 0;
8956 wqe128->generic.bde.addrLow = 88; /* Word 22 */
8958 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8960 /* Word 22-29 FCP CMND Payload */
8961 ptr = &wqe128->words[22];
8962 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8965 case CMD_FCP_ICMND64_CR:
8966 /* word3 iocb=iotag wqe=payload_offset_len */
8967 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8968 bf_set(payload_offset_len, &wqe->fcp_icmd,
8969 xmit_len + sizeof(struct fcp_rsp));
8970 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8972 /* word3 iocb=IO_TAG wqe=reserved */
8973 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8974 /* Always open the exchange */
8975 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8976 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8977 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8978 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8979 LPFC_WQE_LENLOC_NONE);
8980 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8981 iocbq->iocb.ulpFCP2Rcvy);
8982 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8983 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8984 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8985 if (iocbq->priority) {
8986 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8987 (iocbq->priority << 1));
8989 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8990 (phba->cfg_XLanePriority << 1));
8993 /* Note, word 10 is already initialized to 0 */
8995 if (phba->fcp_embed_io) {
8996 struct lpfc_scsi_buf *lpfc_cmd;
8997 struct sli4_sge *sgl;
8998 union lpfc_wqe128 *wqe128;
8999 struct fcp_cmnd *fcp_cmnd;
9002 /* 128 byte wqe support here */
9003 wqe128 = (union lpfc_wqe128 *)wqe;
9005 lpfc_cmd = iocbq->context1;
9006 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9007 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9009 /* Word 0-2 - FCP_CMND */
9010 wqe128->generic.bde.tus.f.bdeFlags =
9011 BUFF_TYPE_BDE_IMMED;
9012 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
9013 wqe128->generic.bde.addrHigh = 0;
9014 wqe128->generic.bde.addrLow = 88; /* Word 22 */
9016 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
9018 /* Word 22-29 FCP CMND Payload */
9019 ptr = &wqe128->words[22];
9020 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9023 case CMD_GEN_REQUEST64_CR:
9024 /* For this command calculate the xmit length of the
9028 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9029 sizeof(struct ulp_bde64);
9030 for (i = 0; i < numBdes; i++) {
9031 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9032 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9034 xmit_len += bde.tus.f.bdeSize;
9036 /* word3 iocb=IO_TAG wqe=request_payload_len */
9037 wqe->gen_req.request_payload_len = xmit_len;
9038 /* word4 iocb=parameter wqe=relative_offset memcpy */
9039 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9040 /* word6 context tag copied in memcpy */
9041 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9042 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9043 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9044 "2015 Invalid CT %x command 0x%x\n",
9045 ct, iocbq->iocb.ulpCommand);
9048 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9049 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9050 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9051 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9052 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9053 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9054 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9055 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9056 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9057 command_type = OTHER_COMMAND;
9059 case CMD_XMIT_ELS_RSP64_CX:
9060 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9061 /* words0-2 BDE memcpy */
9062 /* word3 iocb=iotag32 wqe=response_payload_len */
9063 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9065 wqe->xmit_els_rsp.word4 = 0;
9066 /* word5 iocb=rsvd wge=did */
9067 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9068 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9070 if_type = bf_get(lpfc_sli_intf_if_type,
9071 &phba->sli4_hba.sli_intf);
9072 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
9073 if (iocbq->vport->fc_flag & FC_PT2PT) {
9074 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9075 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9076 iocbq->vport->fc_myDID);
9077 if (iocbq->vport->fc_myDID == Fabric_DID) {
9079 &wqe->xmit_els_rsp.wqe_dest, 0);
9083 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9084 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9085 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9086 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9087 iocbq->iocb.unsli3.rcvsli3.ox_id);
9088 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9089 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9090 phba->vpi_ids[iocbq->vport->vpi]);
9091 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9092 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9093 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9094 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9095 LPFC_WQE_LENLOC_WORD3);
9096 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9097 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9098 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9099 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9100 iocbq->context2)->virt);
9101 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9102 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9103 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9104 iocbq->vport->fc_myDID);
9105 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9106 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9107 phba->vpi_ids[phba->pport->vpi]);
9109 command_type = OTHER_COMMAND;
9111 case CMD_CLOSE_XRI_CN:
9112 case CMD_ABORT_XRI_CN:
9113 case CMD_ABORT_XRI_CX:
9114 /* words 0-2 memcpy should be 0 rserved */
9115 /* port will send abts */
9116 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9117 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9118 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9119 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9123 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9125 * The link is down, or the command was ELS_FIP
9126 * so the fw does not need to send abts
9129 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9131 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9132 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9133 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9134 wqe->abort_cmd.rsrvd5 = 0;
9135 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9136 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9137 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9139 * The abort handler will send us CMD_ABORT_XRI_CN or
9140 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9142 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9143 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9144 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9145 LPFC_WQE_LENLOC_NONE);
9146 cmnd = CMD_ABORT_XRI_CX;
9147 command_type = OTHER_COMMAND;
9150 case CMD_XMIT_BLS_RSP64_CX:
9151 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9152 /* As BLS ABTS RSP WQE is very different from other WQEs,
9153 * we re-construct this WQE here based on information in
9154 * iocbq from scratch.
9156 memset(wqe, 0, sizeof(union lpfc_wqe));
9157 /* OX_ID is invariable to who sent ABTS to CT exchange */
9158 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9159 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9160 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9161 LPFC_ABTS_UNSOL_INT) {
9162 /* ABTS sent by initiator to CT exchange, the
9163 * RX_ID field will be filled with the newly
9164 * allocated responder XRI.
9166 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9167 iocbq->sli4_xritag);
9169 /* ABTS sent by responder to CT exchange, the
9170 * RX_ID field will be filled with the responder
9173 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9174 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9176 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9177 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9180 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9182 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9183 iocbq->iocb.ulpContext);
9184 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9185 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9186 phba->vpi_ids[phba->pport->vpi]);
9187 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9188 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9189 LPFC_WQE_LENLOC_NONE);
9190 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9191 command_type = OTHER_COMMAND;
9192 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9193 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9194 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9195 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9196 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9197 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9198 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9202 case CMD_SEND_FRAME:
9203 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9204 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9206 case CMD_XRI_ABORTED_CX:
9207 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9208 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9209 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9210 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9211 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9213 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9214 "2014 Invalid command 0x%x\n",
9215 iocbq->iocb.ulpCommand);
9220 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9221 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9222 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9223 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9224 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9225 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9226 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9227 LPFC_IO_DIF_INSERT);
9228 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9229 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9230 wqe->generic.wqe_com.abort_tag = abort_tag;
9231 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9232 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9233 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9234 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9239 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9240 * @phba: Pointer to HBA context object.
9241 * @ring_number: SLI ring number to issue iocb on.
9242 * @piocb: Pointer to command iocb.
9243 * @flag: Flag indicating if this command can be put into txq.
9245 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9246 * an iocb command to an HBA with SLI-4 interface spec.
9248 * This function is called with hbalock held. The function will return success
9249 * after it successfully submit the iocb to firmware or after adding to the
9253 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9254 struct lpfc_iocbq *piocb, uint32_t flag)
9256 struct lpfc_sglq *sglq;
9257 union lpfc_wqe *wqe;
9258 union lpfc_wqe128 wqe128;
9259 struct lpfc_queue *wq;
9260 struct lpfc_sli_ring *pring;
9263 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9264 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9265 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9266 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9268 wq = phba->sli4_hba.oas_wq;
9270 wq = phba->sli4_hba.els_wq;
9273 /* Get corresponding ring */
9277 * The WQE can be either 64 or 128 bytes,
9278 * so allocate space on the stack assuming the largest.
9280 wqe = (union lpfc_wqe *)&wqe128;
9282 lockdep_assert_held(&phba->hbalock);
9284 if (piocb->sli4_xritag == NO_XRI) {
9285 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9286 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9289 if (!list_empty(&pring->txq)) {
9290 if (!(flag & SLI_IOCB_RET_IOCB)) {
9291 __lpfc_sli_ringtx_put(phba,
9293 return IOCB_SUCCESS;
9298 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9300 if (!(flag & SLI_IOCB_RET_IOCB)) {
9301 __lpfc_sli_ringtx_put(phba,
9304 return IOCB_SUCCESS;
9310 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9311 /* These IO's already have an XRI and a mapped sgl. */
9315 * This is a continuation of a commandi,(CX) so this
9316 * sglq is on the active list
9318 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9324 piocb->sli4_lxritag = sglq->sli4_lxritag;
9325 piocb->sli4_xritag = sglq->sli4_xritag;
9326 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9330 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
9333 if (lpfc_sli4_wq_put(wq, wqe))
9335 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9341 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9343 * This routine wraps the actual lockless version for issusing IOCB function
9344 * pointer from the lpfc_hba struct.
9347 * IOCB_ERROR - Error
9348 * IOCB_SUCCESS - Success
9352 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9353 struct lpfc_iocbq *piocb, uint32_t flag)
9355 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9359 * lpfc_sli_api_table_setup - Set up sli api function jump table
9360 * @phba: The hba struct for which this call is being executed.
9361 * @dev_grp: The HBA PCI-Device group number.
9363 * This routine sets up the SLI interface API function jump table in @phba
9365 * Returns: 0 - success, -ENODEV - failure.
9368 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9372 case LPFC_PCI_DEV_LP:
9373 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9374 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9376 case LPFC_PCI_DEV_OC:
9377 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9378 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9382 "1419 Invalid HBA PCI-device group: 0x%x\n",
9387 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9392 * lpfc_sli4_calc_ring - Calculates which ring to use
9393 * @phba: Pointer to HBA context object.
9394 * @piocb: Pointer to command iocb.
9396 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9397 * hba_wqidx, thus we need to calculate the corresponding ring.
9398 * Since ABORTS must go on the same WQ of the command they are
9399 * aborting, we use command's hba_wqidx.
9401 struct lpfc_sli_ring *
9402 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9404 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9405 if (!(phba->cfg_fof) ||
9406 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9407 if (unlikely(!phba->sli4_hba.fcp_wq))
9410 * for abort iocb hba_wqidx should already
9411 * be setup based on what work queue we used.
9413 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9415 lpfc_sli4_scmd_to_wqidx_distr(phba,
9417 piocb->hba_wqidx = piocb->hba_wqidx %
9418 phba->cfg_fcp_io_channel;
9420 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9422 if (unlikely(!phba->sli4_hba.oas_wq))
9424 piocb->hba_wqidx = 0;
9425 return phba->sli4_hba.oas_wq->pring;
9428 if (unlikely(!phba->sli4_hba.els_wq))
9430 piocb->hba_wqidx = 0;
9431 return phba->sli4_hba.els_wq->pring;
9436 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9437 * @phba: Pointer to HBA context object.
9438 * @pring: Pointer to driver SLI ring object.
9439 * @piocb: Pointer to command iocb.
9440 * @flag: Flag indicating if this command can be put into txq.
9442 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9443 * function. This function gets the hbalock and calls
9444 * __lpfc_sli_issue_iocb function and will return the error returned
9445 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9446 * functions which do not hold hbalock.
9449 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9450 struct lpfc_iocbq *piocb, uint32_t flag)
9452 struct lpfc_hba_eq_hdl *hba_eq_hdl;
9453 struct lpfc_sli_ring *pring;
9454 struct lpfc_queue *fpeq;
9455 struct lpfc_eqe *eqe;
9456 unsigned long iflags;
9459 if (phba->sli_rev == LPFC_SLI_REV4) {
9460 pring = lpfc_sli4_calc_ring(phba, piocb);
9461 if (unlikely(pring == NULL))
9464 spin_lock_irqsave(&pring->ring_lock, iflags);
9465 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9466 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9468 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9469 idx = piocb->hba_wqidx;
9470 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9472 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9474 /* Get associated EQ with this index */
9475 fpeq = phba->sli4_hba.hba_eq[idx];
9477 /* Turn off interrupts from this EQ */
9478 lpfc_sli4_eq_clr_intr(fpeq);
9481 * Process all the events on FCP EQ
9483 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9484 lpfc_sli4_hba_handle_eqe(phba,
9486 fpeq->EQ_processed++;
9489 /* Always clear and re-arm the EQ */
9490 lpfc_sli4_eq_release(fpeq,
9493 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9496 /* For now, SLI2/3 will still use hbalock */
9497 spin_lock_irqsave(&phba->hbalock, iflags);
9498 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9499 spin_unlock_irqrestore(&phba->hbalock, iflags);
9505 * lpfc_extra_ring_setup - Extra ring setup function
9506 * @phba: Pointer to HBA context object.
9508 * This function is called while driver attaches with the
9509 * HBA to setup the extra ring. The extra ring is used
9510 * only when driver needs to support target mode functionality
9511 * or IP over FC functionalities.
9513 * This function is called with no lock held. SLI3 only.
9516 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9518 struct lpfc_sli *psli;
9519 struct lpfc_sli_ring *pring;
9523 /* Adjust cmd/rsp ring iocb entries more evenly */
9525 /* Take some away from the FCP ring */
9526 pring = &psli->sli3_ring[LPFC_FCP_RING];
9527 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9528 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9529 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9530 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9532 /* and give them to the extra ring */
9533 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9535 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9536 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9537 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9538 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9540 /* Setup default profile for this ring */
9541 pring->iotag_max = 4096;
9542 pring->num_mask = 1;
9543 pring->prt[0].profile = 0; /* Mask 0 */
9544 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9545 pring->prt[0].type = phba->cfg_multi_ring_type;
9546 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9550 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9551 * @phba: Pointer to HBA context object.
9552 * @iocbq: Pointer to iocb object.
9554 * The async_event handler calls this routine when it receives
9555 * an ASYNC_STATUS_CN event from the port. The port generates
9556 * this event when an Abort Sequence request to an rport fails
9557 * twice in succession. The abort could be originated by the
9558 * driver or by the port. The ABTS could have been for an ELS
9559 * or FCP IO. The port only generates this event when an ABTS
9560 * fails to complete after one retry.
9563 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9564 struct lpfc_iocbq *iocbq)
9566 struct lpfc_nodelist *ndlp = NULL;
9567 uint16_t rpi = 0, vpi = 0;
9568 struct lpfc_vport *vport = NULL;
9570 /* The rpi in the ulpContext is vport-sensitive. */
9571 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9572 rpi = iocbq->iocb.ulpContext;
9574 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9575 "3092 Port generated ABTS async event "
9576 "on vpi %d rpi %d status 0x%x\n",
9577 vpi, rpi, iocbq->iocb.ulpStatus);
9579 vport = lpfc_find_vport_by_vpid(phba, vpi);
9582 ndlp = lpfc_findnode_rpi(vport, rpi);
9583 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9586 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9587 lpfc_sli_abts_recover_port(vport, ndlp);
9591 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9592 "3095 Event Context not found, no "
9593 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9594 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9598 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9599 * @phba: pointer to HBA context object.
9600 * @ndlp: nodelist pointer for the impacted rport.
9601 * @axri: pointer to the wcqe containing the failed exchange.
9603 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9604 * port. The port generates this event when an abort exchange request to an
9605 * rport fails twice in succession with no reply. The abort could be originated
9606 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9609 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9610 struct lpfc_nodelist *ndlp,
9611 struct sli4_wcqe_xri_aborted *axri)
9613 struct lpfc_vport *vport;
9614 uint32_t ext_status = 0;
9616 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9618 "3115 Node Context not found, driver "
9619 "ignoring abts err event\n");
9623 vport = ndlp->vport;
9624 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9625 "3116 Port generated FCP XRI ABORT event on "
9626 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9627 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9628 bf_get(lpfc_wcqe_xa_xri, axri),
9629 bf_get(lpfc_wcqe_xa_status, axri),
9633 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9634 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9635 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9637 ext_status = axri->parameter & IOERR_PARAM_MASK;
9638 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9639 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9640 lpfc_sli_abts_recover_port(vport, ndlp);
9644 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9645 * @phba: Pointer to HBA context object.
9646 * @pring: Pointer to driver SLI ring object.
9647 * @iocbq: Pointer to iocb object.
9649 * This function is called by the slow ring event handler
9650 * function when there is an ASYNC event iocb in the ring.
9651 * This function is called with no lock held.
9652 * Currently this function handles only temperature related
9653 * ASYNC events. The function decodes the temperature sensor
9654 * event message and posts events for the management applications.
9657 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9658 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9662 struct temp_event temp_event_data;
9663 struct Scsi_Host *shost;
9666 icmd = &iocbq->iocb;
9667 evt_code = icmd->un.asyncstat.evt_code;
9670 case ASYNC_TEMP_WARN:
9671 case ASYNC_TEMP_SAFE:
9672 temp_event_data.data = (uint32_t) icmd->ulpContext;
9673 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9674 if (evt_code == ASYNC_TEMP_WARN) {
9675 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9676 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9677 "0347 Adapter is very hot, please take "
9678 "corrective action. temperature : %d Celsius\n",
9679 (uint32_t) icmd->ulpContext);
9681 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9682 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9683 "0340 Adapter temperature is OK now. "
9684 "temperature : %d Celsius\n",
9685 (uint32_t) icmd->ulpContext);
9688 /* Send temperature change event to applications */
9689 shost = lpfc_shost_from_vport(phba->pport);
9690 fc_host_post_vendor_event(shost, fc_get_event_number(),
9691 sizeof(temp_event_data), (char *) &temp_event_data,
9694 case ASYNC_STATUS_CN:
9695 lpfc_sli_abts_err_handler(phba, iocbq);
9698 iocb_w = (uint32_t *) icmd;
9699 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9700 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9702 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9703 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9704 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9705 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9706 pring->ringno, icmd->un.asyncstat.evt_code,
9707 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9708 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9709 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9710 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9718 * lpfc_sli4_setup - SLI ring setup function
9719 * @phba: Pointer to HBA context object.
9721 * lpfc_sli_setup sets up rings of the SLI interface with
9722 * number of iocbs per ring and iotags. This function is
9723 * called while driver attach to the HBA and before the
9724 * interrupts are enabled. So there is no need for locking.
9726 * This function always returns 0.
9729 lpfc_sli4_setup(struct lpfc_hba *phba)
9731 struct lpfc_sli_ring *pring;
9733 pring = phba->sli4_hba.els_wq->pring;
9734 pring->num_mask = LPFC_MAX_RING_MASK;
9735 pring->prt[0].profile = 0; /* Mask 0 */
9736 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9737 pring->prt[0].type = FC_TYPE_ELS;
9738 pring->prt[0].lpfc_sli_rcv_unsol_event =
9739 lpfc_els_unsol_event;
9740 pring->prt[1].profile = 0; /* Mask 1 */
9741 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9742 pring->prt[1].type = FC_TYPE_ELS;
9743 pring->prt[1].lpfc_sli_rcv_unsol_event =
9744 lpfc_els_unsol_event;
9745 pring->prt[2].profile = 0; /* Mask 2 */
9746 /* NameServer Inquiry */
9747 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9749 pring->prt[2].type = FC_TYPE_CT;
9750 pring->prt[2].lpfc_sli_rcv_unsol_event =
9751 lpfc_ct_unsol_event;
9752 pring->prt[3].profile = 0; /* Mask 3 */
9753 /* NameServer response */
9754 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9756 pring->prt[3].type = FC_TYPE_CT;
9757 pring->prt[3].lpfc_sli_rcv_unsol_event =
9758 lpfc_ct_unsol_event;
9763 * lpfc_sli_setup - SLI ring setup function
9764 * @phba: Pointer to HBA context object.
9766 * lpfc_sli_setup sets up rings of the SLI interface with
9767 * number of iocbs per ring and iotags. This function is
9768 * called while driver attach to the HBA and before the
9769 * interrupts are enabled. So there is no need for locking.
9771 * This function always returns 0. SLI3 only.
9774 lpfc_sli_setup(struct lpfc_hba *phba)
9776 int i, totiocbsize = 0;
9777 struct lpfc_sli *psli = &phba->sli;
9778 struct lpfc_sli_ring *pring;
9780 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9783 psli->iocbq_lookup = NULL;
9784 psli->iocbq_lookup_len = 0;
9785 psli->last_iotag = 0;
9787 for (i = 0; i < psli->num_rings; i++) {
9788 pring = &psli->sli3_ring[i];
9790 case LPFC_FCP_RING: /* ring 0 - FCP */
9791 /* numCiocb and numRiocb are used in config_port */
9792 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9793 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9794 pring->sli.sli3.numCiocb +=
9795 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9796 pring->sli.sli3.numRiocb +=
9797 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9798 pring->sli.sli3.numCiocb +=
9799 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9800 pring->sli.sli3.numRiocb +=
9801 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9802 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9803 SLI3_IOCB_CMD_SIZE :
9805 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9806 SLI3_IOCB_RSP_SIZE :
9808 pring->iotag_ctr = 0;
9810 (phba->cfg_hba_queue_depth * 2);
9811 pring->fast_iotag = pring->iotag_max;
9812 pring->num_mask = 0;
9814 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
9815 /* numCiocb and numRiocb are used in config_port */
9816 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9817 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9818 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9819 SLI3_IOCB_CMD_SIZE :
9821 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9822 SLI3_IOCB_RSP_SIZE :
9824 pring->iotag_max = phba->cfg_hba_queue_depth;
9825 pring->num_mask = 0;
9827 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
9828 /* numCiocb and numRiocb are used in config_port */
9829 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9830 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9831 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9832 SLI3_IOCB_CMD_SIZE :
9834 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9835 SLI3_IOCB_RSP_SIZE :
9837 pring->fast_iotag = 0;
9838 pring->iotag_ctr = 0;
9839 pring->iotag_max = 4096;
9840 pring->lpfc_sli_rcv_async_status =
9841 lpfc_sli_async_event_handler;
9842 pring->num_mask = LPFC_MAX_RING_MASK;
9843 pring->prt[0].profile = 0; /* Mask 0 */
9844 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9845 pring->prt[0].type = FC_TYPE_ELS;
9846 pring->prt[0].lpfc_sli_rcv_unsol_event =
9847 lpfc_els_unsol_event;
9848 pring->prt[1].profile = 0; /* Mask 1 */
9849 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9850 pring->prt[1].type = FC_TYPE_ELS;
9851 pring->prt[1].lpfc_sli_rcv_unsol_event =
9852 lpfc_els_unsol_event;
9853 pring->prt[2].profile = 0; /* Mask 2 */
9854 /* NameServer Inquiry */
9855 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9857 pring->prt[2].type = FC_TYPE_CT;
9858 pring->prt[2].lpfc_sli_rcv_unsol_event =
9859 lpfc_ct_unsol_event;
9860 pring->prt[3].profile = 0; /* Mask 3 */
9861 /* NameServer response */
9862 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9864 pring->prt[3].type = FC_TYPE_CT;
9865 pring->prt[3].lpfc_sli_rcv_unsol_event =
9866 lpfc_ct_unsol_event;
9869 totiocbsize += (pring->sli.sli3.numCiocb *
9870 pring->sli.sli3.sizeCiocb) +
9871 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9873 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9874 /* Too many cmd / rsp ring entries in SLI2 SLIM */
9875 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9876 "SLI2 SLIM Data: x%x x%lx\n",
9877 phba->brd_no, totiocbsize,
9878 (unsigned long) MAX_SLIM_IOCB_SIZE);
9880 if (phba->cfg_multi_ring_support == 2)
9881 lpfc_extra_ring_setup(phba);
9887 * lpfc_sli4_queue_init - Queue initialization function
9888 * @phba: Pointer to HBA context object.
9890 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
9891 * ring. This function also initializes ring indices of each ring.
9892 * This function is called during the initialization of the SLI
9893 * interface of an HBA.
9894 * This function is called with no lock held and always returns
9898 lpfc_sli4_queue_init(struct lpfc_hba *phba)
9900 struct lpfc_sli *psli;
9901 struct lpfc_sli_ring *pring;
9905 spin_lock_irq(&phba->hbalock);
9906 INIT_LIST_HEAD(&psli->mboxq);
9907 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9908 /* Initialize list headers for txq and txcmplq as double linked lists */
9909 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9910 pring = phba->sli4_hba.fcp_wq[i]->pring;
9912 pring->ringno = LPFC_FCP_RING;
9913 INIT_LIST_HEAD(&pring->txq);
9914 INIT_LIST_HEAD(&pring->txcmplq);
9915 INIT_LIST_HEAD(&pring->iocb_continueq);
9916 spin_lock_init(&pring->ring_lock);
9918 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9919 pring = phba->sli4_hba.nvme_wq[i]->pring;
9921 pring->ringno = LPFC_FCP_RING;
9922 INIT_LIST_HEAD(&pring->txq);
9923 INIT_LIST_HEAD(&pring->txcmplq);
9924 INIT_LIST_HEAD(&pring->iocb_continueq);
9925 spin_lock_init(&pring->ring_lock);
9927 pring = phba->sli4_hba.els_wq->pring;
9929 pring->ringno = LPFC_ELS_RING;
9930 INIT_LIST_HEAD(&pring->txq);
9931 INIT_LIST_HEAD(&pring->txcmplq);
9932 INIT_LIST_HEAD(&pring->iocb_continueq);
9933 spin_lock_init(&pring->ring_lock);
9935 if (phba->cfg_nvme_io_channel) {
9936 pring = phba->sli4_hba.nvmels_wq->pring;
9938 pring->ringno = LPFC_ELS_RING;
9939 INIT_LIST_HEAD(&pring->txq);
9940 INIT_LIST_HEAD(&pring->txcmplq);
9941 INIT_LIST_HEAD(&pring->iocb_continueq);
9942 spin_lock_init(&pring->ring_lock);
9945 if (phba->cfg_fof) {
9946 pring = phba->sli4_hba.oas_wq->pring;
9948 pring->ringno = LPFC_FCP_RING;
9949 INIT_LIST_HEAD(&pring->txq);
9950 INIT_LIST_HEAD(&pring->txcmplq);
9951 INIT_LIST_HEAD(&pring->iocb_continueq);
9952 spin_lock_init(&pring->ring_lock);
9955 spin_unlock_irq(&phba->hbalock);
9959 * lpfc_sli_queue_init - Queue initialization function
9960 * @phba: Pointer to HBA context object.
9962 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9963 * ring. This function also initializes ring indices of each ring.
9964 * This function is called during the initialization of the SLI
9965 * interface of an HBA.
9966 * This function is called with no lock held and always returns
9970 lpfc_sli_queue_init(struct lpfc_hba *phba)
9972 struct lpfc_sli *psli;
9973 struct lpfc_sli_ring *pring;
9977 spin_lock_irq(&phba->hbalock);
9978 INIT_LIST_HEAD(&psli->mboxq);
9979 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9980 /* Initialize list headers for txq and txcmplq as double linked lists */
9981 for (i = 0; i < psli->num_rings; i++) {
9982 pring = &psli->sli3_ring[i];
9984 pring->sli.sli3.next_cmdidx = 0;
9985 pring->sli.sli3.local_getidx = 0;
9986 pring->sli.sli3.cmdidx = 0;
9987 INIT_LIST_HEAD(&pring->iocb_continueq);
9988 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9989 INIT_LIST_HEAD(&pring->postbufq);
9991 INIT_LIST_HEAD(&pring->txq);
9992 INIT_LIST_HEAD(&pring->txcmplq);
9993 spin_lock_init(&pring->ring_lock);
9995 spin_unlock_irq(&phba->hbalock);
9999 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10000 * @phba: Pointer to HBA context object.
10002 * This routine flushes the mailbox command subsystem. It will unconditionally
10003 * flush all the mailbox commands in the three possible stages in the mailbox
10004 * command sub-system: pending mailbox command queue; the outstanding mailbox
10005 * command; and completed mailbox command queue. It is caller's responsibility
10006 * to make sure that the driver is in the proper state to flush the mailbox
10007 * command sub-system. Namely, the posting of mailbox commands into the
10008 * pending mailbox command queue from the various clients must be stopped;
10009 * either the HBA is in a state that it will never works on the outstanding
10010 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10011 * mailbox command has been completed.
10014 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10016 LIST_HEAD(completions);
10017 struct lpfc_sli *psli = &phba->sli;
10019 unsigned long iflag;
10021 /* Flush all the mailbox commands in the mbox system */
10022 spin_lock_irqsave(&phba->hbalock, iflag);
10023 /* The pending mailbox command queue */
10024 list_splice_init(&phba->sli.mboxq, &completions);
10025 /* The outstanding active mailbox command */
10026 if (psli->mbox_active) {
10027 list_add_tail(&psli->mbox_active->list, &completions);
10028 psli->mbox_active = NULL;
10029 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10031 /* The completed mailbox command queue */
10032 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10033 spin_unlock_irqrestore(&phba->hbalock, iflag);
10035 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10036 while (!list_empty(&completions)) {
10037 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10038 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10039 if (pmb->mbox_cmpl)
10040 pmb->mbox_cmpl(phba, pmb);
10045 * lpfc_sli_host_down - Vport cleanup function
10046 * @vport: Pointer to virtual port object.
10048 * lpfc_sli_host_down is called to clean up the resources
10049 * associated with a vport before destroying virtual
10050 * port data structures.
10051 * This function does following operations:
10052 * - Free discovery resources associated with this virtual
10054 * - Free iocbs associated with this virtual port in
10056 * - Send abort for all iocb commands associated with this
10057 * vport in txcmplq.
10059 * This function is called with no lock held and always returns 1.
10062 lpfc_sli_host_down(struct lpfc_vport *vport)
10064 LIST_HEAD(completions);
10065 struct lpfc_hba *phba = vport->phba;
10066 struct lpfc_sli *psli = &phba->sli;
10067 struct lpfc_queue *qp = NULL;
10068 struct lpfc_sli_ring *pring;
10069 struct lpfc_iocbq *iocb, *next_iocb;
10071 unsigned long flags = 0;
10072 uint16_t prev_pring_flag;
10074 lpfc_cleanup_discovery_resources(vport);
10076 spin_lock_irqsave(&phba->hbalock, flags);
10079 * Error everything on the txq since these iocbs
10080 * have not been given to the FW yet.
10081 * Also issue ABTS for everything on the txcmplq
10083 if (phba->sli_rev != LPFC_SLI_REV4) {
10084 for (i = 0; i < psli->num_rings; i++) {
10085 pring = &psli->sli3_ring[i];
10086 prev_pring_flag = pring->flag;
10087 /* Only slow rings */
10088 if (pring->ringno == LPFC_ELS_RING) {
10089 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10090 /* Set the lpfc data pending flag */
10091 set_bit(LPFC_DATA_READY, &phba->data_flags);
10093 list_for_each_entry_safe(iocb, next_iocb,
10094 &pring->txq, list) {
10095 if (iocb->vport != vport)
10097 list_move_tail(&iocb->list, &completions);
10099 list_for_each_entry_safe(iocb, next_iocb,
10100 &pring->txcmplq, list) {
10101 if (iocb->vport != vport)
10103 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10105 pring->flag = prev_pring_flag;
10108 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10112 if (pring == phba->sli4_hba.els_wq->pring) {
10113 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10114 /* Set the lpfc data pending flag */
10115 set_bit(LPFC_DATA_READY, &phba->data_flags);
10117 prev_pring_flag = pring->flag;
10118 spin_lock_irq(&pring->ring_lock);
10119 list_for_each_entry_safe(iocb, next_iocb,
10120 &pring->txq, list) {
10121 if (iocb->vport != vport)
10123 list_move_tail(&iocb->list, &completions);
10125 spin_unlock_irq(&pring->ring_lock);
10126 list_for_each_entry_safe(iocb, next_iocb,
10127 &pring->txcmplq, list) {
10128 if (iocb->vport != vport)
10130 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10132 pring->flag = prev_pring_flag;
10135 spin_unlock_irqrestore(&phba->hbalock, flags);
10137 /* Cancel all the IOCBs from the completions list */
10138 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10144 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10145 * @phba: Pointer to HBA context object.
10147 * This function cleans up all iocb, buffers, mailbox commands
10148 * while shutting down the HBA. This function is called with no
10149 * lock held and always returns 1.
10150 * This function does the following to cleanup driver resources:
10151 * - Free discovery resources for each virtual port
10152 * - Cleanup any pending fabric iocbs
10153 * - Iterate through the iocb txq and free each entry
10155 * - Free up any buffer posted to the HBA
10156 * - Free mailbox commands in the mailbox queue.
10159 lpfc_sli_hba_down(struct lpfc_hba *phba)
10161 LIST_HEAD(completions);
10162 struct lpfc_sli *psli = &phba->sli;
10163 struct lpfc_queue *qp = NULL;
10164 struct lpfc_sli_ring *pring;
10165 struct lpfc_dmabuf *buf_ptr;
10166 unsigned long flags = 0;
10169 /* Shutdown the mailbox command sub-system */
10170 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10172 lpfc_hba_down_prep(phba);
10174 lpfc_fabric_abort_hba(phba);
10176 spin_lock_irqsave(&phba->hbalock, flags);
10179 * Error everything on the txq since these iocbs
10180 * have not been given to the FW yet.
10182 if (phba->sli_rev != LPFC_SLI_REV4) {
10183 for (i = 0; i < psli->num_rings; i++) {
10184 pring = &psli->sli3_ring[i];
10185 /* Only slow rings */
10186 if (pring->ringno == LPFC_ELS_RING) {
10187 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10188 /* Set the lpfc data pending flag */
10189 set_bit(LPFC_DATA_READY, &phba->data_flags);
10191 list_splice_init(&pring->txq, &completions);
10194 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10198 spin_lock_irq(&pring->ring_lock);
10199 list_splice_init(&pring->txq, &completions);
10200 spin_unlock_irq(&pring->ring_lock);
10201 if (pring == phba->sli4_hba.els_wq->pring) {
10202 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10203 /* Set the lpfc data pending flag */
10204 set_bit(LPFC_DATA_READY, &phba->data_flags);
10208 spin_unlock_irqrestore(&phba->hbalock, flags);
10210 /* Cancel all the IOCBs from the completions list */
10211 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10214 spin_lock_irqsave(&phba->hbalock, flags);
10215 list_splice_init(&phba->elsbuf, &completions);
10216 phba->elsbuf_cnt = 0;
10217 phba->elsbuf_prev_cnt = 0;
10218 spin_unlock_irqrestore(&phba->hbalock, flags);
10220 while (!list_empty(&completions)) {
10221 list_remove_head(&completions, buf_ptr,
10222 struct lpfc_dmabuf, list);
10223 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10227 /* Return any active mbox cmds */
10228 del_timer_sync(&psli->mbox_tmo);
10230 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10231 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10232 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10238 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10239 * @srcp: Source memory pointer.
10240 * @destp: Destination memory pointer.
10241 * @cnt: Number of words required to be copied.
10243 * This function is used for copying data between driver memory
10244 * and the SLI memory. This function also changes the endianness
10245 * of each word if native endianness is different from SLI
10246 * endianness. This function can be called with or without
10250 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10252 uint32_t *src = srcp;
10253 uint32_t *dest = destp;
10257 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10259 ldata = le32_to_cpu(ldata);
10268 * lpfc_sli_bemem_bcopy - SLI memory copy function
10269 * @srcp: Source memory pointer.
10270 * @destp: Destination memory pointer.
10271 * @cnt: Number of words required to be copied.
10273 * This function is used for copying data between a data structure
10274 * with big endian representation to local endianness.
10275 * This function can be called with or without lock.
10278 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10280 uint32_t *src = srcp;
10281 uint32_t *dest = destp;
10285 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10287 ldata = be32_to_cpu(ldata);
10295 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10296 * @phba: Pointer to HBA context object.
10297 * @pring: Pointer to driver SLI ring object.
10298 * @mp: Pointer to driver buffer object.
10300 * This function is called with no lock held.
10301 * It always return zero after adding the buffer to the postbufq
10305 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10306 struct lpfc_dmabuf *mp)
10308 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10310 spin_lock_irq(&phba->hbalock);
10311 list_add_tail(&mp->list, &pring->postbufq);
10312 pring->postbufq_cnt++;
10313 spin_unlock_irq(&phba->hbalock);
10318 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10319 * @phba: Pointer to HBA context object.
10321 * When HBQ is enabled, buffers are searched based on tags. This function
10322 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10323 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10324 * does not conflict with tags of buffer posted for unsolicited events.
10325 * The function returns the allocated tag. The function is called with
10329 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10331 spin_lock_irq(&phba->hbalock);
10332 phba->buffer_tag_count++;
10334 * Always set the QUE_BUFTAG_BIT to distiguish between
10335 * a tag assigned by HBQ.
10337 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10338 spin_unlock_irq(&phba->hbalock);
10339 return phba->buffer_tag_count;
10343 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10344 * @phba: Pointer to HBA context object.
10345 * @pring: Pointer to driver SLI ring object.
10346 * @tag: Buffer tag.
10348 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10349 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10350 * iocb is posted to the response ring with the tag of the buffer.
10351 * This function searches the pring->postbufq list using the tag
10352 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10353 * iocb. If the buffer is found then lpfc_dmabuf object of the
10354 * buffer is returned to the caller else NULL is returned.
10355 * This function is called with no lock held.
10357 struct lpfc_dmabuf *
10358 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10361 struct lpfc_dmabuf *mp, *next_mp;
10362 struct list_head *slp = &pring->postbufq;
10364 /* Search postbufq, from the beginning, looking for a match on tag */
10365 spin_lock_irq(&phba->hbalock);
10366 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10367 if (mp->buffer_tag == tag) {
10368 list_del_init(&mp->list);
10369 pring->postbufq_cnt--;
10370 spin_unlock_irq(&phba->hbalock);
10375 spin_unlock_irq(&phba->hbalock);
10376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10377 "0402 Cannot find virtual addr for buffer tag on "
10378 "ring %d Data x%lx x%p x%p x%x\n",
10379 pring->ringno, (unsigned long) tag,
10380 slp->next, slp->prev, pring->postbufq_cnt);
10386 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10387 * @phba: Pointer to HBA context object.
10388 * @pring: Pointer to driver SLI ring object.
10389 * @phys: DMA address of the buffer.
10391 * This function searches the buffer list using the dma_address
10392 * of unsolicited event to find the driver's lpfc_dmabuf object
10393 * corresponding to the dma_address. The function returns the
10394 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10395 * This function is called by the ct and els unsolicited event
10396 * handlers to get the buffer associated with the unsolicited
10399 * This function is called with no lock held.
10401 struct lpfc_dmabuf *
10402 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10405 struct lpfc_dmabuf *mp, *next_mp;
10406 struct list_head *slp = &pring->postbufq;
10408 /* Search postbufq, from the beginning, looking for a match on phys */
10409 spin_lock_irq(&phba->hbalock);
10410 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10411 if (mp->phys == phys) {
10412 list_del_init(&mp->list);
10413 pring->postbufq_cnt--;
10414 spin_unlock_irq(&phba->hbalock);
10419 spin_unlock_irq(&phba->hbalock);
10420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10421 "0410 Cannot find virtual addr for mapped buf on "
10422 "ring %d Data x%llx x%p x%p x%x\n",
10423 pring->ringno, (unsigned long long)phys,
10424 slp->next, slp->prev, pring->postbufq_cnt);
10429 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10430 * @phba: Pointer to HBA context object.
10431 * @cmdiocb: Pointer to driver command iocb object.
10432 * @rspiocb: Pointer to driver response iocb object.
10434 * This function is the completion handler for the abort iocbs for
10435 * ELS commands. This function is called from the ELS ring event
10436 * handler with no lock held. This function frees memory resources
10437 * associated with the abort iocb.
10440 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10441 struct lpfc_iocbq *rspiocb)
10443 IOCB_t *irsp = &rspiocb->iocb;
10444 uint16_t abort_iotag, abort_context;
10445 struct lpfc_iocbq *abort_iocb = NULL;
10447 if (irsp->ulpStatus) {
10450 * Assume that the port already completed and returned, or
10451 * will return the iocb. Just Log the message.
10453 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10454 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10456 spin_lock_irq(&phba->hbalock);
10457 if (phba->sli_rev < LPFC_SLI_REV4) {
10458 if (abort_iotag != 0 &&
10459 abort_iotag <= phba->sli.last_iotag)
10461 phba->sli.iocbq_lookup[abort_iotag];
10463 /* For sli4 the abort_tag is the XRI,
10464 * so the abort routine puts the iotag of the iocb
10465 * being aborted in the context field of the abort
10468 abort_iocb = phba->sli.iocbq_lookup[abort_context];
10470 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10471 "0327 Cannot abort els iocb %p "
10472 "with tag %x context %x, abort status %x, "
10474 abort_iocb, abort_iotag, abort_context,
10475 irsp->ulpStatus, irsp->un.ulpWord[4]);
10477 spin_unlock_irq(&phba->hbalock);
10479 lpfc_sli_release_iocbq(phba, cmdiocb);
10484 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10485 * @phba: Pointer to HBA context object.
10486 * @cmdiocb: Pointer to driver command iocb object.
10487 * @rspiocb: Pointer to driver response iocb object.
10489 * The function is called from SLI ring event handler with no
10490 * lock held. This function is the completion handler for ELS commands
10491 * which are aborted. The function frees memory resources used for
10492 * the aborted ELS commands.
10495 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10496 struct lpfc_iocbq *rspiocb)
10498 IOCB_t *irsp = &rspiocb->iocb;
10500 /* ELS cmd tag <ulpIoTag> completes */
10501 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10502 "0139 Ignoring ELS cmd tag x%x completion Data: "
10504 irsp->ulpIoTag, irsp->ulpStatus,
10505 irsp->un.ulpWord[4], irsp->ulpTimeout);
10506 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10507 lpfc_ct_free_iocb(phba, cmdiocb);
10509 lpfc_els_free_iocb(phba, cmdiocb);
10514 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10515 * @phba: Pointer to HBA context object.
10516 * @pring: Pointer to driver SLI ring object.
10517 * @cmdiocb: Pointer to driver command iocb object.
10519 * This function issues an abort iocb for the provided command iocb down to
10520 * the port. Other than the case the outstanding command iocb is an abort
10521 * request, this function issues abort out unconditionally. This function is
10522 * called with hbalock held. The function returns 0 when it fails due to
10523 * memory allocation failure or when the command iocb is an abort request.
10526 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10527 struct lpfc_iocbq *cmdiocb)
10529 struct lpfc_vport *vport = cmdiocb->vport;
10530 struct lpfc_iocbq *abtsiocbp;
10531 IOCB_t *icmd = NULL;
10532 IOCB_t *iabt = NULL;
10534 unsigned long iflags;
10536 lockdep_assert_held(&phba->hbalock);
10539 * There are certain command types we don't want to abort. And we
10540 * don't want to abort commands that are already in the process of
10543 icmd = &cmdiocb->iocb;
10544 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10545 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10546 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10549 /* issue ABTS for this IOCB based on iotag */
10550 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10551 if (abtsiocbp == NULL)
10554 /* This signals the response to set the correct status
10555 * before calling the completion handler
10557 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10559 iabt = &abtsiocbp->iocb;
10560 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10561 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10562 if (phba->sli_rev == LPFC_SLI_REV4) {
10563 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10564 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10567 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10569 iabt->ulpClass = icmd->ulpClass;
10571 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10572 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10573 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10574 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10575 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10576 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10578 if (phba->link_state >= LPFC_LINK_UP)
10579 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10581 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10583 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10584 abtsiocbp->vport = vport;
10586 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10587 "0339 Abort xri x%x, original iotag x%x, "
10588 "abort cmd iotag x%x\n",
10589 iabt->un.acxri.abortIoTag,
10590 iabt->un.acxri.abortContextTag,
10593 if (phba->sli_rev == LPFC_SLI_REV4) {
10594 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10595 if (unlikely(pring == NULL))
10597 /* Note: both hbalock and ring_lock need to be set here */
10598 spin_lock_irqsave(&pring->ring_lock, iflags);
10599 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10601 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10603 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10608 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10611 * Caller to this routine should check for IOCB_ERROR
10612 * and handle it properly. This routine no longer removes
10613 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10619 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10620 * @phba: Pointer to HBA context object.
10621 * @pring: Pointer to driver SLI ring object.
10622 * @cmdiocb: Pointer to driver command iocb object.
10624 * This function issues an abort iocb for the provided command iocb. In case
10625 * of unloading, the abort iocb will not be issued to commands on the ELS
10626 * ring. Instead, the callback function shall be changed to those commands
10627 * so that nothing happens when them finishes. This function is called with
10628 * hbalock held. The function returns 0 when the command iocb is an abort
10632 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10633 struct lpfc_iocbq *cmdiocb)
10635 struct lpfc_vport *vport = cmdiocb->vport;
10636 int retval = IOCB_ERROR;
10637 IOCB_t *icmd = NULL;
10639 lockdep_assert_held(&phba->hbalock);
10642 * There are certain command types we don't want to abort. And we
10643 * don't want to abort commands that are already in the process of
10646 icmd = &cmdiocb->iocb;
10647 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10648 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10649 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10653 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10654 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10656 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10657 goto abort_iotag_exit;
10661 * If we're unloading, don't abort iocb on the ELS ring, but change
10662 * the callback so that nothing happens when it finishes.
10664 if ((vport->load_flag & FC_UNLOADING) &&
10665 (pring->ringno == LPFC_ELS_RING)) {
10666 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10667 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10669 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10670 goto abort_iotag_exit;
10673 /* Now, we try to issue the abort to the cmdiocb out */
10674 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10678 * Caller to this routine should check for IOCB_ERROR
10679 * and handle it properly. This routine no longer removes
10680 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10686 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10687 * @phba: Pointer to HBA context object.
10688 * @pring: Pointer to driver SLI ring object.
10689 * @cmdiocb: Pointer to driver command iocb object.
10691 * This function issues an abort iocb for the provided command iocb down to
10692 * the port. Other than the case the outstanding command iocb is an abort
10693 * request, this function issues abort out unconditionally. This function is
10694 * called with hbalock held. The function returns 0 when it fails due to
10695 * memory allocation failure or when the command iocb is an abort request.
10698 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10699 struct lpfc_iocbq *cmdiocb)
10701 struct lpfc_vport *vport = cmdiocb->vport;
10702 struct lpfc_iocbq *abtsiocbp;
10703 union lpfc_wqe *abts_wqe;
10707 * There are certain command types we don't want to abort. And we
10708 * don't want to abort commands that are already in the process of
10711 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10712 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10713 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10716 /* issue ABTS for this io based on iotag */
10717 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10718 if (abtsiocbp == NULL)
10721 /* This signals the response to set the correct status
10722 * before calling the completion handler
10724 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10726 /* Complete prepping the abort wqe and issue to the FW. */
10727 abts_wqe = &abtsiocbp->wqe;
10729 /* Clear any stale WQE contents */
10730 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
10731 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10734 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10735 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10736 cmdiocb->iocb.ulpClass);
10738 /* word 8 - tell the FW to abort the IO associated with this
10739 * outstanding exchange ID.
10741 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10743 /* word 9 - this is the iotag for the abts_wqe completion. */
10744 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10748 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10749 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10752 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10753 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10754 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10756 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10757 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10758 abtsiocbp->vport = vport;
10759 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10760 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10762 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10763 "6147 Failed abts issue_wqe with status x%x "
10765 retval, cmdiocb->sli4_xritag);
10766 lpfc_sli_release_iocbq(phba, abtsiocbp);
10770 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10771 "6148 Drv Abort NVME Request Issued for "
10772 "ox_id x%x on reqtag x%x\n",
10773 cmdiocb->sli4_xritag,
10780 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10781 * @phba: pointer to lpfc HBA data structure.
10783 * This routine will abort all pending and outstanding iocbs to an HBA.
10786 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10788 struct lpfc_sli *psli = &phba->sli;
10789 struct lpfc_sli_ring *pring;
10790 struct lpfc_queue *qp = NULL;
10793 if (phba->sli_rev != LPFC_SLI_REV4) {
10794 for (i = 0; i < psli->num_rings; i++) {
10795 pring = &psli->sli3_ring[i];
10796 lpfc_sli_abort_iocb_ring(phba, pring);
10800 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10804 lpfc_sli_abort_iocb_ring(phba, pring);
10809 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
10810 * @iocbq: Pointer to driver iocb object.
10811 * @vport: Pointer to driver virtual port object.
10812 * @tgt_id: SCSI ID of the target.
10813 * @lun_id: LUN ID of the scsi device.
10814 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10816 * This function acts as an iocb filter for functions which abort or count
10817 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10818 * 0 if the filtering criteria is met for the given iocb and will return
10819 * 1 if the filtering criteria is not met.
10820 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10821 * given iocb is for the SCSI device specified by vport, tgt_id and
10822 * lun_id parameter.
10823 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
10824 * given iocb is for the SCSI target specified by vport and tgt_id
10826 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10827 * given iocb is for the SCSI host associated with the given vport.
10828 * This function is called with no locks held.
10831 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10832 uint16_t tgt_id, uint64_t lun_id,
10833 lpfc_ctx_cmd ctx_cmd)
10835 struct lpfc_scsi_buf *lpfc_cmd;
10838 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10841 if (iocbq->vport != vport)
10844 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10846 if (lpfc_cmd->pCmd == NULL)
10851 if ((lpfc_cmd->rdata->pnode) &&
10852 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10853 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10857 if ((lpfc_cmd->rdata->pnode) &&
10858 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10861 case LPFC_CTX_HOST:
10865 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10866 __func__, ctx_cmd);
10874 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
10875 * @vport: Pointer to virtual port.
10876 * @tgt_id: SCSI ID of the target.
10877 * @lun_id: LUN ID of the scsi device.
10878 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10880 * This function returns number of FCP commands pending for the vport.
10881 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10882 * commands pending on the vport associated with SCSI device specified
10883 * by tgt_id and lun_id parameters.
10884 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10885 * commands pending on the vport associated with SCSI target specified
10886 * by tgt_id parameter.
10887 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10888 * commands pending on the vport.
10889 * This function returns the number of iocbs which satisfy the filter.
10890 * This function is called without any lock held.
10893 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10894 lpfc_ctx_cmd ctx_cmd)
10896 struct lpfc_hba *phba = vport->phba;
10897 struct lpfc_iocbq *iocbq;
10900 spin_lock_irq(&phba->hbalock);
10901 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10902 iocbq = phba->sli.iocbq_lookup[i];
10904 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10908 spin_unlock_irq(&phba->hbalock);
10914 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10915 * @phba: Pointer to HBA context object
10916 * @cmdiocb: Pointer to command iocb object.
10917 * @rspiocb: Pointer to response iocb object.
10919 * This function is called when an aborted FCP iocb completes. This
10920 * function is called by the ring event handler with no lock held.
10921 * This function frees the iocb.
10924 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10925 struct lpfc_iocbq *rspiocb)
10927 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10928 "3096 ABORT_XRI_CN completing on rpi x%x "
10929 "original iotag x%x, abort cmd iotag x%x "
10930 "status 0x%x, reason 0x%x\n",
10931 cmdiocb->iocb.un.acxri.abortContextTag,
10932 cmdiocb->iocb.un.acxri.abortIoTag,
10933 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10934 rspiocb->iocb.un.ulpWord[4]);
10935 lpfc_sli_release_iocbq(phba, cmdiocb);
10940 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10941 * @vport: Pointer to virtual port.
10942 * @pring: Pointer to driver SLI ring object.
10943 * @tgt_id: SCSI ID of the target.
10944 * @lun_id: LUN ID of the scsi device.
10945 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10947 * This function sends an abort command for every SCSI command
10948 * associated with the given virtual port pending on the ring
10949 * filtered by lpfc_sli_validate_fcp_iocb function.
10950 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10951 * FCP iocbs associated with lun specified by tgt_id and lun_id
10953 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10954 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10955 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10956 * FCP iocbs associated with virtual port.
10957 * This function returns number of iocbs it failed to abort.
10958 * This function is called with no locks held.
10961 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10962 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10964 struct lpfc_hba *phba = vport->phba;
10965 struct lpfc_iocbq *iocbq;
10966 struct lpfc_iocbq *abtsiocb;
10967 struct lpfc_sli_ring *pring_s4;
10968 IOCB_t *cmd = NULL;
10969 int errcnt = 0, ret_val = 0;
10972 for (i = 1; i <= phba->sli.last_iotag; i++) {
10973 iocbq = phba->sli.iocbq_lookup[i];
10975 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10980 * If the iocbq is already being aborted, don't take a second
10981 * action, but do count it.
10983 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10986 /* issue ABTS for this IOCB based on iotag */
10987 abtsiocb = lpfc_sli_get_iocbq(phba);
10988 if (abtsiocb == NULL) {
10993 /* indicate the IO is being aborted by the driver. */
10994 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10996 cmd = &iocbq->iocb;
10997 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10998 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10999 if (phba->sli_rev == LPFC_SLI_REV4)
11000 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11002 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11003 abtsiocb->iocb.ulpLe = 1;
11004 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11005 abtsiocb->vport = vport;
11007 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11008 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11009 if (iocbq->iocb_flag & LPFC_IO_FCP)
11010 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11011 if (iocbq->iocb_flag & LPFC_IO_FOF)
11012 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11014 if (lpfc_is_link_up(phba))
11015 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11017 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11019 /* Setup callback routine and issue the command. */
11020 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11021 if (phba->sli_rev == LPFC_SLI_REV4) {
11022 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11025 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11028 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11030 if (ret_val == IOCB_ERROR) {
11031 lpfc_sli_release_iocbq(phba, abtsiocb);
11041 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11042 * @vport: Pointer to virtual port.
11043 * @pring: Pointer to driver SLI ring object.
11044 * @tgt_id: SCSI ID of the target.
11045 * @lun_id: LUN ID of the scsi device.
11046 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11048 * This function sends an abort command for every SCSI command
11049 * associated with the given virtual port pending on the ring
11050 * filtered by lpfc_sli_validate_fcp_iocb function.
11051 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11052 * FCP iocbs associated with lun specified by tgt_id and lun_id
11054 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11055 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11056 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11057 * FCP iocbs associated with virtual port.
11058 * This function returns number of iocbs it aborted .
11059 * This function is called with no locks held right after a taskmgmt
11063 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11064 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11066 struct lpfc_hba *phba = vport->phba;
11067 struct lpfc_scsi_buf *lpfc_cmd;
11068 struct lpfc_iocbq *abtsiocbq;
11069 struct lpfc_nodelist *ndlp;
11070 struct lpfc_iocbq *iocbq;
11072 int sum, i, ret_val;
11073 unsigned long iflags;
11074 struct lpfc_sli_ring *pring_s4;
11076 spin_lock_irq(&phba->hbalock);
11078 /* all I/Os are in process of being flushed */
11079 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11080 spin_unlock_irq(&phba->hbalock);
11085 for (i = 1; i <= phba->sli.last_iotag; i++) {
11086 iocbq = phba->sli.iocbq_lookup[i];
11088 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11093 * If the iocbq is already being aborted, don't take a second
11094 * action, but do count it.
11096 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11099 /* issue ABTS for this IOCB based on iotag */
11100 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11101 if (abtsiocbq == NULL)
11104 icmd = &iocbq->iocb;
11105 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11106 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11107 if (phba->sli_rev == LPFC_SLI_REV4)
11108 abtsiocbq->iocb.un.acxri.abortIoTag =
11109 iocbq->sli4_xritag;
11111 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11112 abtsiocbq->iocb.ulpLe = 1;
11113 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11114 abtsiocbq->vport = vport;
11116 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11117 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11118 if (iocbq->iocb_flag & LPFC_IO_FCP)
11119 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11120 if (iocbq->iocb_flag & LPFC_IO_FOF)
11121 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11123 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11124 ndlp = lpfc_cmd->rdata->pnode;
11126 if (lpfc_is_link_up(phba) &&
11127 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11128 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11130 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11132 /* Setup callback routine and issue the command. */
11133 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11136 * Indicate the IO is being aborted by the driver and set
11137 * the caller's flag into the aborted IO.
11139 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11141 if (phba->sli_rev == LPFC_SLI_REV4) {
11142 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11143 if (pring_s4 == NULL)
11145 /* Note: both hbalock and ring_lock must be set here */
11146 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11147 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11149 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11151 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11156 if (ret_val == IOCB_ERROR)
11157 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11161 spin_unlock_irq(&phba->hbalock);
11166 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11167 * @phba: Pointer to HBA context object.
11168 * @cmdiocbq: Pointer to command iocb.
11169 * @rspiocbq: Pointer to response iocb.
11171 * This function is the completion handler for iocbs issued using
11172 * lpfc_sli_issue_iocb_wait function. This function is called by the
11173 * ring event handler function without any lock held. This function
11174 * can be called from both worker thread context and interrupt
11175 * context. This function also can be called from other thread which
11176 * cleans up the SLI layer objects.
11177 * This function copy the contents of the response iocb to the
11178 * response iocb memory object provided by the caller of
11179 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11180 * sleeps for the iocb completion.
11183 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11184 struct lpfc_iocbq *cmdiocbq,
11185 struct lpfc_iocbq *rspiocbq)
11187 wait_queue_head_t *pdone_q;
11188 unsigned long iflags;
11189 struct lpfc_scsi_buf *lpfc_cmd;
11191 spin_lock_irqsave(&phba->hbalock, iflags);
11192 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11195 * A time out has occurred for the iocb. If a time out
11196 * completion handler has been supplied, call it. Otherwise,
11197 * just free the iocbq.
11200 spin_unlock_irqrestore(&phba->hbalock, iflags);
11201 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11202 cmdiocbq->wait_iocb_cmpl = NULL;
11203 if (cmdiocbq->iocb_cmpl)
11204 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11206 lpfc_sli_release_iocbq(phba, cmdiocbq);
11210 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11211 if (cmdiocbq->context2 && rspiocbq)
11212 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11213 &rspiocbq->iocb, sizeof(IOCB_t));
11215 /* Set the exchange busy flag for task management commands */
11216 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11217 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11218 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11220 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11223 pdone_q = cmdiocbq->context_un.wait_queue;
11226 spin_unlock_irqrestore(&phba->hbalock, iflags);
11231 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11232 * @phba: Pointer to HBA context object..
11233 * @piocbq: Pointer to command iocb.
11234 * @flag: Flag to test.
11236 * This routine grabs the hbalock and then test the iocb_flag to
11237 * see if the passed in flag is set.
11239 * 1 if flag is set.
11240 * 0 if flag is not set.
11243 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11244 struct lpfc_iocbq *piocbq, uint32_t flag)
11246 unsigned long iflags;
11249 spin_lock_irqsave(&phba->hbalock, iflags);
11250 ret = piocbq->iocb_flag & flag;
11251 spin_unlock_irqrestore(&phba->hbalock, iflags);
11257 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11258 * @phba: Pointer to HBA context object..
11259 * @pring: Pointer to sli ring.
11260 * @piocb: Pointer to command iocb.
11261 * @prspiocbq: Pointer to response iocb.
11262 * @timeout: Timeout in number of seconds.
11264 * This function issues the iocb to firmware and waits for the
11265 * iocb to complete. The iocb_cmpl field of the shall be used
11266 * to handle iocbs which time out. If the field is NULL, the
11267 * function shall free the iocbq structure. If more clean up is
11268 * needed, the caller is expected to provide a completion function
11269 * that will provide the needed clean up. If the iocb command is
11270 * not completed within timeout seconds, the function will either
11271 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11272 * completion function set in the iocb_cmpl field and then return
11273 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11274 * resources if this function returns IOCB_TIMEDOUT.
11275 * The function waits for the iocb completion using an
11276 * non-interruptible wait.
11277 * This function will sleep while waiting for iocb completion.
11278 * So, this function should not be called from any context which
11279 * does not allow sleeping. Due to the same reason, this function
11280 * cannot be called with interrupt disabled.
11281 * This function assumes that the iocb completions occur while
11282 * this function sleep. So, this function cannot be called from
11283 * the thread which process iocb completion for this ring.
11284 * This function clears the iocb_flag of the iocb object before
11285 * issuing the iocb and the iocb completion handler sets this
11286 * flag and wakes this thread when the iocb completes.
11287 * The contents of the response iocb will be copied to prspiocbq
11288 * by the completion handler when the command completes.
11289 * This function returns IOCB_SUCCESS when success.
11290 * This function is called with no lock held.
11293 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11294 uint32_t ring_number,
11295 struct lpfc_iocbq *piocb,
11296 struct lpfc_iocbq *prspiocbq,
11299 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11300 long timeleft, timeout_req = 0;
11301 int retval = IOCB_SUCCESS;
11303 struct lpfc_iocbq *iocb;
11305 int txcmplq_cnt = 0;
11306 struct lpfc_sli_ring *pring;
11307 unsigned long iflags;
11308 bool iocb_completed = true;
11310 if (phba->sli_rev >= LPFC_SLI_REV4)
11311 pring = lpfc_sli4_calc_ring(phba, piocb);
11313 pring = &phba->sli.sli3_ring[ring_number];
11315 * If the caller has provided a response iocbq buffer, then context2
11316 * is NULL or its an error.
11319 if (piocb->context2)
11321 piocb->context2 = prspiocbq;
11324 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11325 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11326 piocb->context_un.wait_queue = &done_q;
11327 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11329 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11330 if (lpfc_readl(phba->HCregaddr, &creg_val))
11332 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11333 writel(creg_val, phba->HCregaddr);
11334 readl(phba->HCregaddr); /* flush */
11337 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11338 SLI_IOCB_RET_IOCB);
11339 if (retval == IOCB_SUCCESS) {
11340 timeout_req = msecs_to_jiffies(timeout * 1000);
11341 timeleft = wait_event_timeout(done_q,
11342 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11344 spin_lock_irqsave(&phba->hbalock, iflags);
11345 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11348 * IOCB timed out. Inform the wake iocb wait
11349 * completion function and set local status
11352 iocb_completed = false;
11353 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11355 spin_unlock_irqrestore(&phba->hbalock, iflags);
11356 if (iocb_completed) {
11357 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11358 "0331 IOCB wake signaled\n");
11359 /* Note: we are not indicating if the IOCB has a success
11360 * status or not - that's for the caller to check.
11361 * IOCB_SUCCESS means just that the command was sent and
11362 * completed. Not that it completed successfully.
11364 } else if (timeleft == 0) {
11365 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11366 "0338 IOCB wait timeout error - no "
11367 "wake response Data x%x\n", timeout);
11368 retval = IOCB_TIMEDOUT;
11370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11371 "0330 IOCB wake NOT set, "
11373 timeout, (timeleft / jiffies));
11374 retval = IOCB_TIMEDOUT;
11376 } else if (retval == IOCB_BUSY) {
11377 if (phba->cfg_log_verbose & LOG_SLI) {
11378 list_for_each_entry(iocb, &pring->txq, list) {
11381 list_for_each_entry(iocb, &pring->txcmplq, list) {
11384 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11385 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11386 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11391 "0332 IOCB wait issue failed, Data x%x\n",
11393 retval = IOCB_ERROR;
11396 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11397 if (lpfc_readl(phba->HCregaddr, &creg_val))
11399 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11400 writel(creg_val, phba->HCregaddr);
11401 readl(phba->HCregaddr); /* flush */
11405 piocb->context2 = NULL;
11407 piocb->context_un.wait_queue = NULL;
11408 piocb->iocb_cmpl = NULL;
11413 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11414 * @phba: Pointer to HBA context object.
11415 * @pmboxq: Pointer to driver mailbox object.
11416 * @timeout: Timeout in number of seconds.
11418 * This function issues the mailbox to firmware and waits for the
11419 * mailbox command to complete. If the mailbox command is not
11420 * completed within timeout seconds, it returns MBX_TIMEOUT.
11421 * The function waits for the mailbox completion using an
11422 * interruptible wait. If the thread is woken up due to a
11423 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11424 * should not free the mailbox resources, if this function returns
11426 * This function will sleep while waiting for mailbox completion.
11427 * So, this function should not be called from any context which
11428 * does not allow sleeping. Due to the same reason, this function
11429 * cannot be called with interrupt disabled.
11430 * This function assumes that the mailbox completion occurs while
11431 * this function sleep. So, this function cannot be called from
11432 * the worker thread which processes mailbox completion.
11433 * This function is called in the context of HBA management
11435 * This function returns MBX_SUCCESS when successful.
11436 * This function is called with no lock held.
11439 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11442 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11443 MAILBOX_t *mb = NULL;
11445 unsigned long flag;
11447 /* The caller might set context1 for extended buffer */
11448 if (pmboxq->context1)
11449 mb = (MAILBOX_t *)pmboxq->context1;
11451 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11452 /* setup wake call as IOCB callback */
11453 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11454 /* setup context field to pass wait_queue pointer to wake function */
11455 pmboxq->context1 = &done_q;
11457 /* now issue the command */
11458 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11459 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11460 wait_event_interruptible_timeout(done_q,
11461 pmboxq->mbox_flag & LPFC_MBX_WAKE,
11462 msecs_to_jiffies(timeout * 1000));
11464 spin_lock_irqsave(&phba->hbalock, flag);
11465 /* restore the possible extended buffer for free resource */
11466 pmboxq->context1 = (uint8_t *)mb;
11468 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11469 * else do not free the resources.
11471 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11472 retval = MBX_SUCCESS;
11474 retval = MBX_TIMEOUT;
11475 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11477 spin_unlock_irqrestore(&phba->hbalock, flag);
11479 /* restore the possible extended buffer for free resource */
11480 pmboxq->context1 = (uint8_t *)mb;
11487 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11488 * @phba: Pointer to HBA context.
11490 * This function is called to shutdown the driver's mailbox sub-system.
11491 * It first marks the mailbox sub-system is in a block state to prevent
11492 * the asynchronous mailbox command from issued off the pending mailbox
11493 * command queue. If the mailbox command sub-system shutdown is due to
11494 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11495 * the mailbox sub-system flush routine to forcefully bring down the
11496 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11497 * as with offline or HBA function reset), this routine will wait for the
11498 * outstanding mailbox command to complete before invoking the mailbox
11499 * sub-system flush routine to gracefully bring down mailbox sub-system.
11502 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11504 struct lpfc_sli *psli = &phba->sli;
11505 unsigned long timeout;
11507 if (mbx_action == LPFC_MBX_NO_WAIT) {
11508 /* delay 100ms for port state */
11510 lpfc_sli_mbox_sys_flush(phba);
11513 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11515 spin_lock_irq(&phba->hbalock);
11516 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11518 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11519 /* Determine how long we might wait for the active mailbox
11520 * command to be gracefully completed by firmware.
11522 if (phba->sli.mbox_active)
11523 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11524 phba->sli.mbox_active) *
11526 spin_unlock_irq(&phba->hbalock);
11528 while (phba->sli.mbox_active) {
11529 /* Check active mailbox complete status every 2ms */
11531 if (time_after(jiffies, timeout))
11532 /* Timeout, let the mailbox flush routine to
11533 * forcefully release active mailbox command
11538 spin_unlock_irq(&phba->hbalock);
11540 lpfc_sli_mbox_sys_flush(phba);
11544 * lpfc_sli_eratt_read - read sli-3 error attention events
11545 * @phba: Pointer to HBA context.
11547 * This function is called to read the SLI3 device error attention registers
11548 * for possible error attention events. The caller must hold the hostlock
11549 * with spin_lock_irq().
11551 * This function returns 1 when there is Error Attention in the Host Attention
11552 * Register and returns 0 otherwise.
11555 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11559 /* Read chip Host Attention (HA) register */
11560 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11563 if (ha_copy & HA_ERATT) {
11564 /* Read host status register to retrieve error event */
11565 if (lpfc_sli_read_hs(phba))
11568 /* Check if there is a deferred error condition is active */
11569 if ((HS_FFER1 & phba->work_hs) &&
11570 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11571 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11572 phba->hba_flag |= DEFER_ERATT;
11573 /* Clear all interrupt enable conditions */
11574 writel(0, phba->HCregaddr);
11575 readl(phba->HCregaddr);
11578 /* Set the driver HA work bitmap */
11579 phba->work_ha |= HA_ERATT;
11580 /* Indicate polling handles this ERATT */
11581 phba->hba_flag |= HBA_ERATT_HANDLED;
11587 /* Set the driver HS work bitmap */
11588 phba->work_hs |= UNPLUG_ERR;
11589 /* Set the driver HA work bitmap */
11590 phba->work_ha |= HA_ERATT;
11591 /* Indicate polling handles this ERATT */
11592 phba->hba_flag |= HBA_ERATT_HANDLED;
11597 * lpfc_sli4_eratt_read - read sli-4 error attention events
11598 * @phba: Pointer to HBA context.
11600 * This function is called to read the SLI4 device error attention registers
11601 * for possible error attention events. The caller must hold the hostlock
11602 * with spin_lock_irq().
11604 * This function returns 1 when there is Error Attention in the Host Attention
11605 * Register and returns 0 otherwise.
11608 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11610 uint32_t uerr_sta_hi, uerr_sta_lo;
11611 uint32_t if_type, portsmphr;
11612 struct lpfc_register portstat_reg;
11615 * For now, use the SLI4 device internal unrecoverable error
11616 * registers for error attention. This can be changed later.
11618 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11620 case LPFC_SLI_INTF_IF_TYPE_0:
11621 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11623 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11625 phba->work_hs |= UNPLUG_ERR;
11626 phba->work_ha |= HA_ERATT;
11627 phba->hba_flag |= HBA_ERATT_HANDLED;
11630 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11631 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11632 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11633 "1423 HBA Unrecoverable error: "
11634 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11635 "ue_mask_lo_reg=0x%x, "
11636 "ue_mask_hi_reg=0x%x\n",
11637 uerr_sta_lo, uerr_sta_hi,
11638 phba->sli4_hba.ue_mask_lo,
11639 phba->sli4_hba.ue_mask_hi);
11640 phba->work_status[0] = uerr_sta_lo;
11641 phba->work_status[1] = uerr_sta_hi;
11642 phba->work_ha |= HA_ERATT;
11643 phba->hba_flag |= HBA_ERATT_HANDLED;
11647 case LPFC_SLI_INTF_IF_TYPE_2:
11648 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11649 &portstat_reg.word0) ||
11650 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11652 phba->work_hs |= UNPLUG_ERR;
11653 phba->work_ha |= HA_ERATT;
11654 phba->hba_flag |= HBA_ERATT_HANDLED;
11657 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11658 phba->work_status[0] =
11659 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11660 phba->work_status[1] =
11661 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11662 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11663 "2885 Port Status Event: "
11664 "port status reg 0x%x, "
11665 "port smphr reg 0x%x, "
11666 "error 1=0x%x, error 2=0x%x\n",
11667 portstat_reg.word0,
11669 phba->work_status[0],
11670 phba->work_status[1]);
11671 phba->work_ha |= HA_ERATT;
11672 phba->hba_flag |= HBA_ERATT_HANDLED;
11676 case LPFC_SLI_INTF_IF_TYPE_1:
11678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11679 "2886 HBA Error Attention on unsupported "
11680 "if type %d.", if_type);
11688 * lpfc_sli_check_eratt - check error attention events
11689 * @phba: Pointer to HBA context.
11691 * This function is called from timer soft interrupt context to check HBA's
11692 * error attention register bit for error attention events.
11694 * This function returns 1 when there is Error Attention in the Host Attention
11695 * Register and returns 0 otherwise.
11698 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11702 /* If somebody is waiting to handle an eratt, don't process it
11703 * here. The brdkill function will do this.
11705 if (phba->link_flag & LS_IGNORE_ERATT)
11708 /* Check if interrupt handler handles this ERATT */
11709 spin_lock_irq(&phba->hbalock);
11710 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11711 /* Interrupt handler has handled ERATT */
11712 spin_unlock_irq(&phba->hbalock);
11717 * If there is deferred error attention, do not check for error
11720 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11721 spin_unlock_irq(&phba->hbalock);
11725 /* If PCI channel is offline, don't process it */
11726 if (unlikely(pci_channel_offline(phba->pcidev))) {
11727 spin_unlock_irq(&phba->hbalock);
11731 switch (phba->sli_rev) {
11732 case LPFC_SLI_REV2:
11733 case LPFC_SLI_REV3:
11734 /* Read chip Host Attention (HA) register */
11735 ha_copy = lpfc_sli_eratt_read(phba);
11737 case LPFC_SLI_REV4:
11738 /* Read device Uncoverable Error (UERR) registers */
11739 ha_copy = lpfc_sli4_eratt_read(phba);
11742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11743 "0299 Invalid SLI revision (%d)\n",
11748 spin_unlock_irq(&phba->hbalock);
11754 * lpfc_intr_state_check - Check device state for interrupt handling
11755 * @phba: Pointer to HBA context.
11757 * This inline routine checks whether a device or its PCI slot is in a state
11758 * that the interrupt should be handled.
11760 * This function returns 0 if the device or the PCI slot is in a state that
11761 * interrupt should be handled, otherwise -EIO.
11764 lpfc_intr_state_check(struct lpfc_hba *phba)
11766 /* If the pci channel is offline, ignore all the interrupts */
11767 if (unlikely(pci_channel_offline(phba->pcidev)))
11770 /* Update device level interrupt statistics */
11771 phba->sli.slistat.sli_intr++;
11773 /* Ignore all interrupts during initialization. */
11774 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11781 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11782 * @irq: Interrupt number.
11783 * @dev_id: The device context pointer.
11785 * This function is directly called from the PCI layer as an interrupt
11786 * service routine when device with SLI-3 interface spec is enabled with
11787 * MSI-X multi-message interrupt mode and there are slow-path events in
11788 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11789 * interrupt mode, this function is called as part of the device-level
11790 * interrupt handler. When the PCI slot is in error recovery or the HBA
11791 * is undergoing initialization, the interrupt handler will not process
11792 * the interrupt. The link attention and ELS ring attention events are
11793 * handled by the worker thread. The interrupt handler signals the worker
11794 * thread and returns for these events. This function is called without
11795 * any lock held. It gets the hbalock to access and update SLI data
11798 * This function returns IRQ_HANDLED when interrupt is handled else it
11799 * returns IRQ_NONE.
11802 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11804 struct lpfc_hba *phba;
11805 uint32_t ha_copy, hc_copy;
11806 uint32_t work_ha_copy;
11807 unsigned long status;
11808 unsigned long iflag;
11811 MAILBOX_t *mbox, *pmbox;
11812 struct lpfc_vport *vport;
11813 struct lpfc_nodelist *ndlp;
11814 struct lpfc_dmabuf *mp;
11819 * Get the driver's phba structure from the dev_id and
11820 * assume the HBA is not interrupting.
11822 phba = (struct lpfc_hba *)dev_id;
11824 if (unlikely(!phba))
11828 * Stuff needs to be attented to when this function is invoked as an
11829 * individual interrupt handler in MSI-X multi-message interrupt mode
11831 if (phba->intr_type == MSIX) {
11832 /* Check device state for handling interrupt */
11833 if (lpfc_intr_state_check(phba))
11835 /* Need to read HA REG for slow-path events */
11836 spin_lock_irqsave(&phba->hbalock, iflag);
11837 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11839 /* If somebody is waiting to handle an eratt don't process it
11840 * here. The brdkill function will do this.
11842 if (phba->link_flag & LS_IGNORE_ERATT)
11843 ha_copy &= ~HA_ERATT;
11844 /* Check the need for handling ERATT in interrupt handler */
11845 if (ha_copy & HA_ERATT) {
11846 if (phba->hba_flag & HBA_ERATT_HANDLED)
11847 /* ERATT polling has handled ERATT */
11848 ha_copy &= ~HA_ERATT;
11850 /* Indicate interrupt handler handles ERATT */
11851 phba->hba_flag |= HBA_ERATT_HANDLED;
11855 * If there is deferred error attention, do not check for any
11858 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11859 spin_unlock_irqrestore(&phba->hbalock, iflag);
11863 /* Clear up only attention source related to slow-path */
11864 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11867 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11868 HC_LAINT_ENA | HC_ERINT_ENA),
11870 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11872 writel(hc_copy, phba->HCregaddr);
11873 readl(phba->HAregaddr); /* flush */
11874 spin_unlock_irqrestore(&phba->hbalock, iflag);
11876 ha_copy = phba->ha_copy;
11878 work_ha_copy = ha_copy & phba->work_ha_mask;
11880 if (work_ha_copy) {
11881 if (work_ha_copy & HA_LATT) {
11882 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11884 * Turn off Link Attention interrupts
11885 * until CLEAR_LA done
11887 spin_lock_irqsave(&phba->hbalock, iflag);
11888 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11889 if (lpfc_readl(phba->HCregaddr, &control))
11891 control &= ~HC_LAINT_ENA;
11892 writel(control, phba->HCregaddr);
11893 readl(phba->HCregaddr); /* flush */
11894 spin_unlock_irqrestore(&phba->hbalock, iflag);
11897 work_ha_copy &= ~HA_LATT;
11900 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11902 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11903 * the only slow ring.
11905 status = (work_ha_copy &
11906 (HA_RXMASK << (4*LPFC_ELS_RING)));
11907 status >>= (4*LPFC_ELS_RING);
11908 if (status & HA_RXMASK) {
11909 spin_lock_irqsave(&phba->hbalock, iflag);
11910 if (lpfc_readl(phba->HCregaddr, &control))
11913 lpfc_debugfs_slow_ring_trc(phba,
11914 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11916 (uint32_t)phba->sli.slistat.sli_intr);
11918 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11919 lpfc_debugfs_slow_ring_trc(phba,
11920 "ISR Disable ring:"
11921 "pwork:x%x hawork:x%x wait:x%x",
11922 phba->work_ha, work_ha_copy,
11923 (uint32_t)((unsigned long)
11924 &phba->work_waitq));
11927 ~(HC_R0INT_ENA << LPFC_ELS_RING);
11928 writel(control, phba->HCregaddr);
11929 readl(phba->HCregaddr); /* flush */
11932 lpfc_debugfs_slow_ring_trc(phba,
11933 "ISR slow ring: pwork:"
11934 "x%x hawork:x%x wait:x%x",
11935 phba->work_ha, work_ha_copy,
11936 (uint32_t)((unsigned long)
11937 &phba->work_waitq));
11939 spin_unlock_irqrestore(&phba->hbalock, iflag);
11942 spin_lock_irqsave(&phba->hbalock, iflag);
11943 if (work_ha_copy & HA_ERATT) {
11944 if (lpfc_sli_read_hs(phba))
11947 * Check if there is a deferred error condition
11950 if ((HS_FFER1 & phba->work_hs) &&
11951 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11952 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11954 phba->hba_flag |= DEFER_ERATT;
11955 /* Clear all interrupt enable conditions */
11956 writel(0, phba->HCregaddr);
11957 readl(phba->HCregaddr);
11961 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11962 pmb = phba->sli.mbox_active;
11963 pmbox = &pmb->u.mb;
11965 vport = pmb->vport;
11967 /* First check out the status word */
11968 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11969 if (pmbox->mbxOwner != OWN_HOST) {
11970 spin_unlock_irqrestore(&phba->hbalock, iflag);
11972 * Stray Mailbox Interrupt, mbxCommand <cmd>
11973 * mbxStatus <status>
11975 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11977 "(%d):0304 Stray Mailbox "
11978 "Interrupt mbxCommand x%x "
11980 (vport ? vport->vpi : 0),
11983 /* clear mailbox attention bit */
11984 work_ha_copy &= ~HA_MBATT;
11986 phba->sli.mbox_active = NULL;
11987 spin_unlock_irqrestore(&phba->hbalock, iflag);
11988 phba->last_completion_time = jiffies;
11989 del_timer(&phba->sli.mbox_tmo);
11990 if (pmb->mbox_cmpl) {
11991 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11993 if (pmb->out_ext_byte_len &&
11995 lpfc_sli_pcimem_bcopy(
11998 pmb->out_ext_byte_len);
12000 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12001 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12003 lpfc_debugfs_disc_trc(vport,
12004 LPFC_DISC_TRC_MBOX_VPORT,
12005 "MBOX dflt rpi: : "
12006 "status:x%x rpi:x%x",
12007 (uint32_t)pmbox->mbxStatus,
12008 pmbox->un.varWords[0], 0);
12010 if (!pmbox->mbxStatus) {
12011 mp = (struct lpfc_dmabuf *)
12013 ndlp = (struct lpfc_nodelist *)
12016 /* Reg_LOGIN of dflt RPI was
12017 * successful. new lets get
12018 * rid of the RPI using the
12019 * same mbox buffer.
12021 lpfc_unreg_login(phba,
12023 pmbox->un.varWords[0],
12026 lpfc_mbx_cmpl_dflt_rpi;
12027 pmb->context1 = mp;
12028 pmb->context2 = ndlp;
12029 pmb->vport = vport;
12030 rc = lpfc_sli_issue_mbox(phba,
12033 if (rc != MBX_BUSY)
12034 lpfc_printf_log(phba,
12036 LOG_MBOX | LOG_SLI,
12037 "0350 rc should have"
12038 "been MBX_BUSY\n");
12039 if (rc != MBX_NOT_FINISHED)
12040 goto send_current_mbox;
12044 &phba->pport->work_port_lock,
12046 phba->pport->work_port_events &=
12048 spin_unlock_irqrestore(
12049 &phba->pport->work_port_lock,
12051 lpfc_mbox_cmpl_put(phba, pmb);
12054 spin_unlock_irqrestore(&phba->hbalock, iflag);
12056 if ((work_ha_copy & HA_MBATT) &&
12057 (phba->sli.mbox_active == NULL)) {
12059 /* Process next mailbox command if there is one */
12061 rc = lpfc_sli_issue_mbox(phba, NULL,
12063 } while (rc == MBX_NOT_FINISHED);
12064 if (rc != MBX_SUCCESS)
12065 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12066 LOG_SLI, "0349 rc should be "
12070 spin_lock_irqsave(&phba->hbalock, iflag);
12071 phba->work_ha |= work_ha_copy;
12072 spin_unlock_irqrestore(&phba->hbalock, iflag);
12073 lpfc_worker_wake_up(phba);
12075 return IRQ_HANDLED;
12077 spin_unlock_irqrestore(&phba->hbalock, iflag);
12078 return IRQ_HANDLED;
12080 } /* lpfc_sli_sp_intr_handler */
12083 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12084 * @irq: Interrupt number.
12085 * @dev_id: The device context pointer.
12087 * This function is directly called from the PCI layer as an interrupt
12088 * service routine when device with SLI-3 interface spec is enabled with
12089 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12090 * ring event in the HBA. However, when the device is enabled with either
12091 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12092 * device-level interrupt handler. When the PCI slot is in error recovery
12093 * or the HBA is undergoing initialization, the interrupt handler will not
12094 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12095 * the intrrupt context. This function is called without any lock held.
12096 * It gets the hbalock to access and update SLI data structures.
12098 * This function returns IRQ_HANDLED when interrupt is handled else it
12099 * returns IRQ_NONE.
12102 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12104 struct lpfc_hba *phba;
12106 unsigned long status;
12107 unsigned long iflag;
12108 struct lpfc_sli_ring *pring;
12110 /* Get the driver's phba structure from the dev_id and
12111 * assume the HBA is not interrupting.
12113 phba = (struct lpfc_hba *) dev_id;
12115 if (unlikely(!phba))
12119 * Stuff needs to be attented to when this function is invoked as an
12120 * individual interrupt handler in MSI-X multi-message interrupt mode
12122 if (phba->intr_type == MSIX) {
12123 /* Check device state for handling interrupt */
12124 if (lpfc_intr_state_check(phba))
12126 /* Need to read HA REG for FCP ring and other ring events */
12127 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12128 return IRQ_HANDLED;
12129 /* Clear up only attention source related to fast-path */
12130 spin_lock_irqsave(&phba->hbalock, iflag);
12132 * If there is deferred error attention, do not check for
12135 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12136 spin_unlock_irqrestore(&phba->hbalock, iflag);
12139 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12141 readl(phba->HAregaddr); /* flush */
12142 spin_unlock_irqrestore(&phba->hbalock, iflag);
12144 ha_copy = phba->ha_copy;
12147 * Process all events on FCP ring. Take the optimized path for FCP IO.
12149 ha_copy &= ~(phba->work_ha_mask);
12151 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12152 status >>= (4*LPFC_FCP_RING);
12153 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12154 if (status & HA_RXMASK)
12155 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12157 if (phba->cfg_multi_ring_support == 2) {
12159 * Process all events on extra ring. Take the optimized path
12160 * for extra ring IO.
12162 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12163 status >>= (4*LPFC_EXTRA_RING);
12164 if (status & HA_RXMASK) {
12165 lpfc_sli_handle_fast_ring_event(phba,
12166 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12170 return IRQ_HANDLED;
12171 } /* lpfc_sli_fp_intr_handler */
12174 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12175 * @irq: Interrupt number.
12176 * @dev_id: The device context pointer.
12178 * This function is the HBA device-level interrupt handler to device with
12179 * SLI-3 interface spec, called from the PCI layer when either MSI or
12180 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12181 * requires driver attention. This function invokes the slow-path interrupt
12182 * attention handling function and fast-path interrupt attention handling
12183 * function in turn to process the relevant HBA attention events. This
12184 * function is called without any lock held. It gets the hbalock to access
12185 * and update SLI data structures.
12187 * This function returns IRQ_HANDLED when interrupt is handled, else it
12188 * returns IRQ_NONE.
12191 lpfc_sli_intr_handler(int irq, void *dev_id)
12193 struct lpfc_hba *phba;
12194 irqreturn_t sp_irq_rc, fp_irq_rc;
12195 unsigned long status1, status2;
12199 * Get the driver's phba structure from the dev_id and
12200 * assume the HBA is not interrupting.
12202 phba = (struct lpfc_hba *) dev_id;
12204 if (unlikely(!phba))
12207 /* Check device state for handling interrupt */
12208 if (lpfc_intr_state_check(phba))
12211 spin_lock(&phba->hbalock);
12212 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12213 spin_unlock(&phba->hbalock);
12214 return IRQ_HANDLED;
12217 if (unlikely(!phba->ha_copy)) {
12218 spin_unlock(&phba->hbalock);
12220 } else if (phba->ha_copy & HA_ERATT) {
12221 if (phba->hba_flag & HBA_ERATT_HANDLED)
12222 /* ERATT polling has handled ERATT */
12223 phba->ha_copy &= ~HA_ERATT;
12225 /* Indicate interrupt handler handles ERATT */
12226 phba->hba_flag |= HBA_ERATT_HANDLED;
12230 * If there is deferred error attention, do not check for any interrupt.
12232 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12233 spin_unlock(&phba->hbalock);
12237 /* Clear attention sources except link and error attentions */
12238 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12239 spin_unlock(&phba->hbalock);
12240 return IRQ_HANDLED;
12242 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12243 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12245 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12246 writel(hc_copy, phba->HCregaddr);
12247 readl(phba->HAregaddr); /* flush */
12248 spin_unlock(&phba->hbalock);
12251 * Invokes slow-path host attention interrupt handling as appropriate.
12254 /* status of events with mailbox and link attention */
12255 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12257 /* status of events with ELS ring */
12258 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12259 status2 >>= (4*LPFC_ELS_RING);
12261 if (status1 || (status2 & HA_RXMASK))
12262 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12264 sp_irq_rc = IRQ_NONE;
12267 * Invoke fast-path host attention interrupt handling as appropriate.
12270 /* status of events with FCP ring */
12271 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12272 status1 >>= (4*LPFC_FCP_RING);
12274 /* status of events with extra ring */
12275 if (phba->cfg_multi_ring_support == 2) {
12276 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12277 status2 >>= (4*LPFC_EXTRA_RING);
12281 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12282 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12284 fp_irq_rc = IRQ_NONE;
12286 /* Return device-level interrupt handling status */
12287 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12288 } /* lpfc_sli_intr_handler */
12291 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12292 * @phba: pointer to lpfc hba data structure.
12294 * This routine is invoked by the worker thread to process all the pending
12295 * SLI4 FCP abort XRI events.
12297 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12299 struct lpfc_cq_event *cq_event;
12301 /* First, declare the fcp xri abort event has been handled */
12302 spin_lock_irq(&phba->hbalock);
12303 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12304 spin_unlock_irq(&phba->hbalock);
12305 /* Now, handle all the fcp xri abort events */
12306 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12307 /* Get the first event from the head of the event queue */
12308 spin_lock_irq(&phba->hbalock);
12309 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12310 cq_event, struct lpfc_cq_event, list);
12311 spin_unlock_irq(&phba->hbalock);
12312 /* Notify aborted XRI for FCP work queue */
12313 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12314 /* Free the event processed back to the free pool */
12315 lpfc_sli4_cq_event_release(phba, cq_event);
12320 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12321 * @phba: pointer to lpfc hba data structure.
12323 * This routine is invoked by the worker thread to process all the pending
12324 * SLI4 NVME abort XRI events.
12326 void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12328 struct lpfc_cq_event *cq_event;
12330 /* First, declare the fcp xri abort event has been handled */
12331 spin_lock_irq(&phba->hbalock);
12332 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12333 spin_unlock_irq(&phba->hbalock);
12334 /* Now, handle all the fcp xri abort events */
12335 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12336 /* Get the first event from the head of the event queue */
12337 spin_lock_irq(&phba->hbalock);
12338 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12339 cq_event, struct lpfc_cq_event, list);
12340 spin_unlock_irq(&phba->hbalock);
12341 /* Notify aborted XRI for NVME work queue */
12342 if (phba->nvmet_support) {
12343 lpfc_sli4_nvmet_xri_aborted(phba,
12344 &cq_event->cqe.wcqe_axri);
12346 lpfc_sli4_nvme_xri_aborted(phba,
12347 &cq_event->cqe.wcqe_axri);
12349 /* Free the event processed back to the free pool */
12350 lpfc_sli4_cq_event_release(phba, cq_event);
12355 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12356 * @phba: pointer to lpfc hba data structure.
12358 * This routine is invoked by the worker thread to process all the pending
12359 * SLI4 els abort xri events.
12361 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12363 struct lpfc_cq_event *cq_event;
12365 /* First, declare the els xri abort event has been handled */
12366 spin_lock_irq(&phba->hbalock);
12367 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12368 spin_unlock_irq(&phba->hbalock);
12369 /* Now, handle all the els xri abort events */
12370 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12371 /* Get the first event from the head of the event queue */
12372 spin_lock_irq(&phba->hbalock);
12373 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12374 cq_event, struct lpfc_cq_event, list);
12375 spin_unlock_irq(&phba->hbalock);
12376 /* Notify aborted XRI for ELS work queue */
12377 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12378 /* Free the event processed back to the free pool */
12379 lpfc_sli4_cq_event_release(phba, cq_event);
12384 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12385 * @phba: pointer to lpfc hba data structure
12386 * @pIocbIn: pointer to the rspiocbq
12387 * @pIocbOut: pointer to the cmdiocbq
12388 * @wcqe: pointer to the complete wcqe
12390 * This routine transfers the fields of a command iocbq to a response iocbq
12391 * by copying all the IOCB fields from command iocbq and transferring the
12392 * completion status information from the complete wcqe.
12395 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12396 struct lpfc_iocbq *pIocbIn,
12397 struct lpfc_iocbq *pIocbOut,
12398 struct lpfc_wcqe_complete *wcqe)
12401 unsigned long iflags;
12402 uint32_t status, max_response;
12403 struct lpfc_dmabuf *dmabuf;
12404 struct ulp_bde64 *bpl, bde;
12405 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12407 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12408 sizeof(struct lpfc_iocbq) - offset);
12409 /* Map WCQE parameters into irspiocb parameters */
12410 status = bf_get(lpfc_wcqe_c_status, wcqe);
12411 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12412 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12413 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12414 pIocbIn->iocb.un.fcpi.fcpi_parm =
12415 pIocbOut->iocb.un.fcpi.fcpi_parm -
12416 wcqe->total_data_placed;
12418 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12420 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12421 switch (pIocbOut->iocb.ulpCommand) {
12422 case CMD_ELS_REQUEST64_CR:
12423 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12424 bpl = (struct ulp_bde64 *)dmabuf->virt;
12425 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12426 max_response = bde.tus.f.bdeSize;
12428 case CMD_GEN_REQUEST64_CR:
12430 if (!pIocbOut->context3)
12432 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12433 sizeof(struct ulp_bde64);
12434 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12435 bpl = (struct ulp_bde64 *)dmabuf->virt;
12436 for (i = 0; i < numBdes; i++) {
12437 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12438 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12439 max_response += bde.tus.f.bdeSize;
12443 max_response = wcqe->total_data_placed;
12446 if (max_response < wcqe->total_data_placed)
12447 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12449 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12450 wcqe->total_data_placed;
12453 /* Convert BG errors for completion status */
12454 if (status == CQE_STATUS_DI_ERROR) {
12455 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12457 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12458 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12460 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12462 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12463 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12464 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12465 BGS_GUARD_ERR_MASK;
12466 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12467 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12468 BGS_APPTAG_ERR_MASK;
12469 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12470 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12471 BGS_REFTAG_ERR_MASK;
12473 /* Check to see if there was any good data before the error */
12474 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12475 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12476 BGS_HI_WATER_MARK_PRESENT_MASK;
12477 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12478 wcqe->total_data_placed;
12482 * Set ALL the error bits to indicate we don't know what
12483 * type of error it is.
12485 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12486 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12487 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12488 BGS_GUARD_ERR_MASK);
12491 /* Pick up HBA exchange busy condition */
12492 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12493 spin_lock_irqsave(&phba->hbalock, iflags);
12494 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12495 spin_unlock_irqrestore(&phba->hbalock, iflags);
12500 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12501 * @phba: Pointer to HBA context object.
12502 * @wcqe: Pointer to work-queue completion queue entry.
12504 * This routine handles an ELS work-queue completion event and construct
12505 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12506 * discovery engine to handle.
12508 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12510 static struct lpfc_iocbq *
12511 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12512 struct lpfc_iocbq *irspiocbq)
12514 struct lpfc_sli_ring *pring;
12515 struct lpfc_iocbq *cmdiocbq;
12516 struct lpfc_wcqe_complete *wcqe;
12517 unsigned long iflags;
12519 pring = lpfc_phba_elsring(phba);
12520 if (unlikely(!pring))
12523 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12524 spin_lock_irqsave(&pring->ring_lock, iflags);
12525 pring->stats.iocb_event++;
12526 /* Look up the ELS command IOCB and create pseudo response IOCB */
12527 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12528 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12529 if (unlikely(!cmdiocbq)) {
12530 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12531 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12532 "0386 ELS complete with no corresponding "
12533 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12534 wcqe->word0, wcqe->total_data_placed,
12535 wcqe->parameter, wcqe->word3);
12536 lpfc_sli_release_iocbq(phba, irspiocbq);
12540 /* Put the iocb back on the txcmplq */
12541 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12542 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12544 /* Fake the irspiocbq and copy necessary response information */
12545 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12551 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12552 * @phba: Pointer to HBA context object.
12553 * @cqe: Pointer to mailbox completion queue entry.
12555 * This routine process a mailbox completion queue entry with asynchrous
12558 * Return: true if work posted to worker thread, otherwise false.
12561 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12563 struct lpfc_cq_event *cq_event;
12564 unsigned long iflags;
12566 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12567 "0392 Async Event: word0:x%x, word1:x%x, "
12568 "word2:x%x, word3:x%x\n", mcqe->word0,
12569 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12571 /* Allocate a new internal CQ_EVENT entry */
12572 cq_event = lpfc_sli4_cq_event_alloc(phba);
12574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12575 "0394 Failed to allocate CQ_EVENT entry\n");
12579 /* Move the CQE into an asynchronous event entry */
12580 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12581 spin_lock_irqsave(&phba->hbalock, iflags);
12582 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12583 /* Set the async event flag */
12584 phba->hba_flag |= ASYNC_EVENT;
12585 spin_unlock_irqrestore(&phba->hbalock, iflags);
12591 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12592 * @phba: Pointer to HBA context object.
12593 * @cqe: Pointer to mailbox completion queue entry.
12595 * This routine process a mailbox completion queue entry with mailbox
12596 * completion event.
12598 * Return: true if work posted to worker thread, otherwise false.
12601 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12603 uint32_t mcqe_status;
12604 MAILBOX_t *mbox, *pmbox;
12605 struct lpfc_mqe *mqe;
12606 struct lpfc_vport *vport;
12607 struct lpfc_nodelist *ndlp;
12608 struct lpfc_dmabuf *mp;
12609 unsigned long iflags;
12611 bool workposted = false;
12614 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12615 if (!bf_get(lpfc_trailer_completed, mcqe))
12616 goto out_no_mqe_complete;
12618 /* Get the reference to the active mbox command */
12619 spin_lock_irqsave(&phba->hbalock, iflags);
12620 pmb = phba->sli.mbox_active;
12621 if (unlikely(!pmb)) {
12622 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12623 "1832 No pending MBOX command to handle\n");
12624 spin_unlock_irqrestore(&phba->hbalock, iflags);
12625 goto out_no_mqe_complete;
12627 spin_unlock_irqrestore(&phba->hbalock, iflags);
12629 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12631 vport = pmb->vport;
12633 /* Reset heartbeat timer */
12634 phba->last_completion_time = jiffies;
12635 del_timer(&phba->sli.mbox_tmo);
12637 /* Move mbox data to caller's mailbox region, do endian swapping */
12638 if (pmb->mbox_cmpl && mbox)
12639 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12642 * For mcqe errors, conditionally move a modified error code to
12643 * the mbox so that the error will not be missed.
12645 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12646 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12647 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12648 bf_set(lpfc_mqe_status, mqe,
12649 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12651 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12652 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12653 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12654 "MBOX dflt rpi: status:x%x rpi:x%x",
12656 pmbox->un.varWords[0], 0);
12657 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12658 mp = (struct lpfc_dmabuf *)(pmb->context1);
12659 ndlp = (struct lpfc_nodelist *)pmb->context2;
12660 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12661 * RID of the PPI using the same mbox buffer.
12663 lpfc_unreg_login(phba, vport->vpi,
12664 pmbox->un.varWords[0], pmb);
12665 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12666 pmb->context1 = mp;
12667 pmb->context2 = ndlp;
12668 pmb->vport = vport;
12669 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12670 if (rc != MBX_BUSY)
12671 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12672 LOG_SLI, "0385 rc should "
12673 "have been MBX_BUSY\n");
12674 if (rc != MBX_NOT_FINISHED)
12675 goto send_current_mbox;
12678 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12679 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12680 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12682 /* There is mailbox completion work to do */
12683 spin_lock_irqsave(&phba->hbalock, iflags);
12684 __lpfc_mbox_cmpl_put(phba, pmb);
12685 phba->work_ha |= HA_MBATT;
12686 spin_unlock_irqrestore(&phba->hbalock, iflags);
12690 spin_lock_irqsave(&phba->hbalock, iflags);
12691 /* Release the mailbox command posting token */
12692 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12693 /* Setting active mailbox pointer need to be in sync to flag clear */
12694 phba->sli.mbox_active = NULL;
12695 if (bf_get(lpfc_trailer_consumed, mcqe))
12696 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12697 spin_unlock_irqrestore(&phba->hbalock, iflags);
12698 /* Wake up worker thread to post the next pending mailbox command */
12699 lpfc_worker_wake_up(phba);
12702 out_no_mqe_complete:
12703 spin_lock_irqsave(&phba->hbalock, iflags);
12704 if (bf_get(lpfc_trailer_consumed, mcqe))
12705 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12706 spin_unlock_irqrestore(&phba->hbalock, iflags);
12711 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12712 * @phba: Pointer to HBA context object.
12713 * @cqe: Pointer to mailbox completion queue entry.
12715 * This routine process a mailbox completion queue entry, it invokes the
12716 * proper mailbox complete handling or asynchrous event handling routine
12717 * according to the MCQE's async bit.
12719 * Return: true if work posted to worker thread, otherwise false.
12722 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12724 struct lpfc_mcqe mcqe;
12727 /* Copy the mailbox MCQE and convert endian order as needed */
12728 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12730 /* Invoke the proper event handling routine */
12731 if (!bf_get(lpfc_trailer_async, &mcqe))
12732 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12734 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12739 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12740 * @phba: Pointer to HBA context object.
12741 * @cq: Pointer to associated CQ
12742 * @wcqe: Pointer to work-queue completion queue entry.
12744 * This routine handles an ELS work-queue completion event.
12746 * Return: true if work posted to worker thread, otherwise false.
12749 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12750 struct lpfc_wcqe_complete *wcqe)
12752 struct lpfc_iocbq *irspiocbq;
12753 unsigned long iflags;
12754 struct lpfc_sli_ring *pring = cq->pring;
12756 int txcmplq_cnt = 0;
12757 int fcp_txcmplq_cnt = 0;
12759 /* Get an irspiocbq for later ELS response processing use */
12760 irspiocbq = lpfc_sli_get_iocbq(phba);
12762 if (!list_empty(&pring->txq))
12764 if (!list_empty(&pring->txcmplq))
12766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12767 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12768 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12769 txq_cnt, phba->iocb_cnt,
12775 /* Save off the slow-path queue event for work thread to process */
12776 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12777 spin_lock_irqsave(&phba->hbalock, iflags);
12778 list_add_tail(&irspiocbq->cq_event.list,
12779 &phba->sli4_hba.sp_queue_event);
12780 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12781 spin_unlock_irqrestore(&phba->hbalock, iflags);
12787 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12788 * @phba: Pointer to HBA context object.
12789 * @wcqe: Pointer to work-queue completion queue entry.
12791 * This routine handles slow-path WQ entry consumed event by invoking the
12792 * proper WQ release routine to the slow-path WQ.
12795 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12796 struct lpfc_wcqe_release *wcqe)
12798 /* sanity check on queue memory */
12799 if (unlikely(!phba->sli4_hba.els_wq))
12801 /* Check for the slow-path ELS work queue */
12802 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12803 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12804 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12806 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12807 "2579 Slow-path wqe consume event carries "
12808 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12809 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12810 phba->sli4_hba.els_wq->queue_id);
12814 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12815 * @phba: Pointer to HBA context object.
12816 * @cq: Pointer to a WQ completion queue.
12817 * @wcqe: Pointer to work-queue completion queue entry.
12819 * This routine handles an XRI abort event.
12821 * Return: true if work posted to worker thread, otherwise false.
12824 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12825 struct lpfc_queue *cq,
12826 struct sli4_wcqe_xri_aborted *wcqe)
12828 bool workposted = false;
12829 struct lpfc_cq_event *cq_event;
12830 unsigned long iflags;
12832 /* Allocate a new internal CQ_EVENT entry */
12833 cq_event = lpfc_sli4_cq_event_alloc(phba);
12835 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12836 "0602 Failed to allocate CQ_EVENT entry\n");
12840 /* Move the CQE into the proper xri abort event list */
12841 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12842 switch (cq->subtype) {
12844 spin_lock_irqsave(&phba->hbalock, iflags);
12845 list_add_tail(&cq_event->list,
12846 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12847 /* Set the fcp xri abort event flag */
12848 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12849 spin_unlock_irqrestore(&phba->hbalock, iflags);
12853 spin_lock_irqsave(&phba->hbalock, iflags);
12854 list_add_tail(&cq_event->list,
12855 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12856 /* Set the els xri abort event flag */
12857 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12858 spin_unlock_irqrestore(&phba->hbalock, iflags);
12862 spin_lock_irqsave(&phba->hbalock, iflags);
12863 list_add_tail(&cq_event->list,
12864 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12865 /* Set the nvme xri abort event flag */
12866 phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12867 spin_unlock_irqrestore(&phba->hbalock, iflags);
12871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12872 "0603 Invalid CQ subtype %d: "
12873 "%08x %08x %08x %08x\n",
12874 cq->subtype, wcqe->word0, wcqe->parameter,
12875 wcqe->word2, wcqe->word3);
12876 lpfc_sli4_cq_event_release(phba, cq_event);
12877 workposted = false;
12884 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12885 * @phba: Pointer to HBA context object.
12886 * @rcqe: Pointer to receive-queue completion queue entry.
12888 * This routine process a receive-queue completion queue entry.
12890 * Return: true if work posted to worker thread, otherwise false.
12893 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12895 bool workposted = false;
12896 struct fc_frame_header *fc_hdr;
12897 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12898 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12899 struct lpfc_nvmet_tgtport *tgtp;
12900 struct hbq_dmabuf *dma_buf;
12901 uint32_t status, rq_id;
12902 unsigned long iflags;
12904 /* sanity check on queue memory */
12905 if (unlikely(!hrq) || unlikely(!drq))
12908 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12909 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12911 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12912 if (rq_id != hrq->queue_id)
12915 status = bf_get(lpfc_rcqe_status, rcqe);
12917 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12919 "2537 Receive Frame Truncated!!\n");
12920 case FC_STATUS_RQ_SUCCESS:
12921 lpfc_sli4_rq_release(hrq, drq);
12922 spin_lock_irqsave(&phba->hbalock, iflags);
12923 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12925 hrq->RQ_no_buf_found++;
12926 spin_unlock_irqrestore(&phba->hbalock, iflags);
12930 hrq->RQ_buf_posted--;
12931 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12933 /* If a NVME LS event (type 0x28), treat it as Fast path */
12934 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12936 /* save off the frame for the word thread to process */
12937 list_add_tail(&dma_buf->cq_event.list,
12938 &phba->sli4_hba.sp_queue_event);
12939 /* Frame received */
12940 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12941 spin_unlock_irqrestore(&phba->hbalock, iflags);
12944 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12945 if (phba->nvmet_support) {
12946 tgtp = phba->targetport->private;
12947 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
12948 "6402 RQE Error x%x, posted %d err_cnt "
12950 status, hrq->RQ_buf_posted,
12951 hrq->RQ_no_posted_buf,
12952 atomic_read(&tgtp->rcv_fcp_cmd_in),
12953 atomic_read(&tgtp->rcv_fcp_cmd_out),
12954 atomic_read(&tgtp->xmt_fcp_release));
12958 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12959 hrq->RQ_no_posted_buf++;
12960 /* Post more buffers if possible */
12961 spin_lock_irqsave(&phba->hbalock, iflags);
12962 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12963 spin_unlock_irqrestore(&phba->hbalock, iflags);
12972 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12973 * @phba: Pointer to HBA context object.
12974 * @cq: Pointer to the completion queue.
12975 * @wcqe: Pointer to a completion queue entry.
12977 * This routine process a slow-path work-queue or receive queue completion queue
12980 * Return: true if work posted to worker thread, otherwise false.
12983 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12984 struct lpfc_cqe *cqe)
12986 struct lpfc_cqe cqevt;
12987 bool workposted = false;
12989 /* Copy the work queue CQE and convert endian order if needed */
12990 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12992 /* Check and process for different type of WCQE and dispatch */
12993 switch (bf_get(lpfc_cqe_code, &cqevt)) {
12994 case CQE_CODE_COMPL_WQE:
12995 /* Process the WQ/RQ complete event */
12996 phba->last_completion_time = jiffies;
12997 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12998 (struct lpfc_wcqe_complete *)&cqevt);
13000 case CQE_CODE_RELEASE_WQE:
13001 /* Process the WQ release event */
13002 lpfc_sli4_sp_handle_rel_wcqe(phba,
13003 (struct lpfc_wcqe_release *)&cqevt);
13005 case CQE_CODE_XRI_ABORTED:
13006 /* Process the WQ XRI abort event */
13007 phba->last_completion_time = jiffies;
13008 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13009 (struct sli4_wcqe_xri_aborted *)&cqevt);
13011 case CQE_CODE_RECEIVE:
13012 case CQE_CODE_RECEIVE_V1:
13013 /* Process the RQ event */
13014 phba->last_completion_time = jiffies;
13015 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13016 (struct lpfc_rcqe *)&cqevt);
13019 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13020 "0388 Not a valid WCQE code: x%x\n",
13021 bf_get(lpfc_cqe_code, &cqevt));
13028 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13029 * @phba: Pointer to HBA context object.
13030 * @eqe: Pointer to fast-path event queue entry.
13032 * This routine process a event queue entry from the slow-path event queue.
13033 * It will check the MajorCode and MinorCode to determine this is for a
13034 * completion event on a completion queue, if not, an error shall be logged
13035 * and just return. Otherwise, it will get to the corresponding completion
13036 * queue and process all the entries on that completion queue, rearm the
13037 * completion queue, and then return.
13041 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13042 struct lpfc_queue *speq)
13044 struct lpfc_queue *cq = NULL, *childq;
13045 struct lpfc_cqe *cqe;
13046 bool workposted = false;
13050 /* Get the reference to the corresponding CQ */
13051 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13053 list_for_each_entry(childq, &speq->child_list, list) {
13054 if (childq->queue_id == cqid) {
13059 if (unlikely(!cq)) {
13060 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13062 "0365 Slow-path CQ identifier "
13063 "(%d) does not exist\n", cqid);
13067 /* Save EQ associated with this CQ */
13068 cq->assoc_qp = speq;
13070 /* Process all the entries to the CQ */
13071 switch (cq->type) {
13073 while ((cqe = lpfc_sli4_cq_get(cq))) {
13074 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13075 if (!(++ecount % cq->entry_repost))
13081 while ((cqe = lpfc_sli4_cq_get(cq))) {
13082 if ((cq->subtype == LPFC_FCP) ||
13083 (cq->subtype == LPFC_NVME))
13084 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13087 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13089 if (!(++ecount % cq->entry_repost))
13093 /* Track the max number of CQEs processed in 1 EQ */
13094 if (ecount > cq->CQ_max_cqe)
13095 cq->CQ_max_cqe = ecount;
13098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13099 "0370 Invalid completion queue type (%d)\n",
13104 /* Catch the no cq entry condition, log an error */
13105 if (unlikely(ecount == 0))
13106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13107 "0371 No entry from the CQ: identifier "
13108 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13110 /* In any case, flash and re-arm the RCQ */
13111 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13113 /* wake up worker thread if there are works to be done */
13115 lpfc_worker_wake_up(phba);
13121 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13122 * @phba: Pointer to HBA context object.
13123 * @cq: Pointer to associated CQ
13124 * @wcqe: Pointer to work-queue completion queue entry.
13126 * This routine process a fast-path work queue completion entry from fast-path
13127 * event queue for FCP command response completion.
13130 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13131 struct lpfc_wcqe_complete *wcqe)
13133 struct lpfc_sli_ring *pring = cq->pring;
13134 struct lpfc_iocbq *cmdiocbq;
13135 struct lpfc_iocbq irspiocbq;
13136 unsigned long iflags;
13138 /* Check for response status */
13139 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13140 /* If resource errors reported from HBA, reduce queue
13141 * depth of the SCSI device.
13143 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13144 IOSTAT_LOCAL_REJECT)) &&
13145 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13146 IOERR_NO_RESOURCES))
13147 phba->lpfc_rampdown_queue_depth(phba);
13149 /* Log the error status */
13150 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13151 "0373 FCP complete error: status=x%x, "
13152 "hw_status=x%x, total_data_specified=%d, "
13153 "parameter=x%x, word3=x%x\n",
13154 bf_get(lpfc_wcqe_c_status, wcqe),
13155 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13156 wcqe->total_data_placed, wcqe->parameter,
13160 /* Look up the FCP command IOCB and create pseudo response IOCB */
13161 spin_lock_irqsave(&pring->ring_lock, iflags);
13162 pring->stats.iocb_event++;
13163 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13164 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13165 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13166 if (unlikely(!cmdiocbq)) {
13167 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13168 "0374 FCP complete with no corresponding "
13169 "cmdiocb: iotag (%d)\n",
13170 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13175 cmdiocbq->isr_timestamp =
13176 cq->assoc_qp->isr_timestamp;
13178 if (cmdiocbq->iocb_cmpl == NULL) {
13179 if (cmdiocbq->wqe_cmpl) {
13180 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13181 spin_lock_irqsave(&phba->hbalock, iflags);
13182 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13183 spin_unlock_irqrestore(&phba->hbalock, iflags);
13186 /* Pass the cmd_iocb and the wcqe to the upper layer */
13187 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13191 "0375 FCP cmdiocb not callback function "
13193 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13197 /* Fake the irspiocb and copy necessary response information */
13198 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13200 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13201 spin_lock_irqsave(&phba->hbalock, iflags);
13202 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13203 spin_unlock_irqrestore(&phba->hbalock, iflags);
13206 /* Pass the cmd_iocb and the rsp state to the upper layer */
13207 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13211 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13212 * @phba: Pointer to HBA context object.
13213 * @cq: Pointer to completion queue.
13214 * @wcqe: Pointer to work-queue completion queue entry.
13216 * This routine handles an fast-path WQ entry consumed event by invoking the
13217 * proper WQ release routine to the slow-path WQ.
13220 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13221 struct lpfc_wcqe_release *wcqe)
13223 struct lpfc_queue *childwq;
13224 bool wqid_matched = false;
13227 /* Check for fast-path FCP work queue release */
13228 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13229 list_for_each_entry(childwq, &cq->child_list, list) {
13230 if (childwq->queue_id == hba_wqid) {
13231 lpfc_sli4_wq_release(childwq,
13232 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13233 wqid_matched = true;
13237 /* Report warning log message if no match found */
13238 if (wqid_matched != true)
13239 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13240 "2580 Fast-path wqe consume event carries "
13241 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13245 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13246 * @phba: Pointer to HBA context object.
13247 * @rcqe: Pointer to receive-queue completion queue entry.
13249 * This routine process a receive-queue completion queue entry.
13251 * Return: true if work posted to worker thread, otherwise false.
13254 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13255 struct lpfc_rcqe *rcqe)
13257 bool workposted = false;
13258 struct lpfc_queue *hrq;
13259 struct lpfc_queue *drq;
13260 struct rqb_dmabuf *dma_buf;
13261 struct fc_frame_header *fc_hdr;
13262 struct lpfc_nvmet_tgtport *tgtp;
13263 uint32_t status, rq_id;
13264 unsigned long iflags;
13265 uint32_t fctl, idx;
13267 if ((phba->nvmet_support == 0) ||
13268 (phba->sli4_hba.nvmet_cqset == NULL))
13271 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13272 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13273 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13275 /* sanity check on queue memory */
13276 if (unlikely(!hrq) || unlikely(!drq))
13279 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13280 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13282 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13284 if ((phba->nvmet_support == 0) ||
13285 (rq_id != hrq->queue_id))
13288 status = bf_get(lpfc_rcqe_status, rcqe);
13290 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13291 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13292 "6126 Receive Frame Truncated!!\n");
13294 case FC_STATUS_RQ_SUCCESS:
13295 lpfc_sli4_rq_release(hrq, drq);
13296 spin_lock_irqsave(&phba->hbalock, iflags);
13297 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13299 hrq->RQ_no_buf_found++;
13300 spin_unlock_irqrestore(&phba->hbalock, iflags);
13303 spin_unlock_irqrestore(&phba->hbalock, iflags);
13305 hrq->RQ_buf_posted--;
13306 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13308 /* Just some basic sanity checks on FCP Command frame */
13309 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13310 fc_hdr->fh_f_ctl[1] << 8 |
13311 fc_hdr->fh_f_ctl[2]);
13313 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13314 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13315 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13318 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13319 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13320 lpfc_nvmet_unsol_fcp_event(
13321 phba, idx, dma_buf,
13322 cq->assoc_qp->isr_timestamp);
13326 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13328 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13329 if (phba->nvmet_support) {
13330 tgtp = phba->targetport->private;
13331 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13332 "6401 RQE Error x%x, posted %d err_cnt "
13334 status, hrq->RQ_buf_posted,
13335 hrq->RQ_no_posted_buf,
13336 atomic_read(&tgtp->rcv_fcp_cmd_in),
13337 atomic_read(&tgtp->rcv_fcp_cmd_out),
13338 atomic_read(&tgtp->xmt_fcp_release));
13342 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13343 hrq->RQ_no_posted_buf++;
13344 /* Post more buffers if possible */
13352 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13353 * @cq: Pointer to the completion queue.
13354 * @eqe: Pointer to fast-path completion queue entry.
13356 * This routine process a fast-path work queue completion entry from fast-path
13357 * event queue for FCP command response completion.
13360 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13361 struct lpfc_cqe *cqe)
13363 struct lpfc_wcqe_release wcqe;
13364 bool workposted = false;
13366 /* Copy the work queue CQE and convert endian order if needed */
13367 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13369 /* Check and process for different type of WCQE and dispatch */
13370 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13371 case CQE_CODE_COMPL_WQE:
13372 case CQE_CODE_NVME_ERSP:
13374 /* Process the WQ complete event */
13375 phba->last_completion_time = jiffies;
13376 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13377 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13378 (struct lpfc_wcqe_complete *)&wcqe);
13379 if (cq->subtype == LPFC_NVME_LS)
13380 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13381 (struct lpfc_wcqe_complete *)&wcqe);
13383 case CQE_CODE_RELEASE_WQE:
13384 cq->CQ_release_wqe++;
13385 /* Process the WQ release event */
13386 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13387 (struct lpfc_wcqe_release *)&wcqe);
13389 case CQE_CODE_XRI_ABORTED:
13390 cq->CQ_xri_aborted++;
13391 /* Process the WQ XRI abort event */
13392 phba->last_completion_time = jiffies;
13393 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13394 (struct sli4_wcqe_xri_aborted *)&wcqe);
13396 case CQE_CODE_RECEIVE_V1:
13397 case CQE_CODE_RECEIVE:
13398 phba->last_completion_time = jiffies;
13399 if (cq->subtype == LPFC_NVMET) {
13400 workposted = lpfc_sli4_nvmet_handle_rcqe(
13401 phba, cq, (struct lpfc_rcqe *)&wcqe);
13405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13406 "0144 Not a valid CQE code: x%x\n",
13407 bf_get(lpfc_wcqe_c_code, &wcqe));
13414 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
13415 * @phba: Pointer to HBA context object.
13416 * @eqe: Pointer to fast-path event queue entry.
13418 * This routine process a event queue entry from the fast-path event queue.
13419 * It will check the MajorCode and MinorCode to determine this is for a
13420 * completion event on a completion queue, if not, an error shall be logged
13421 * and just return. Otherwise, it will get to the corresponding completion
13422 * queue and process all the entries on the completion queue, rearm the
13423 * completion queue, and then return.
13426 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13429 struct lpfc_queue *cq = NULL;
13430 struct lpfc_cqe *cqe;
13431 bool workposted = false;
13435 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13437 "0366 Not a valid completion "
13438 "event: majorcode=x%x, minorcode=x%x\n",
13439 bf_get_le32(lpfc_eqe_major_code, eqe),
13440 bf_get_le32(lpfc_eqe_minor_code, eqe));
13444 /* Get the reference to the corresponding CQ */
13445 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13447 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13448 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13449 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13450 /* Process NVMET unsol rcv */
13451 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13456 if (phba->sli4_hba.nvme_cq_map &&
13457 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13458 /* Process NVME / NVMET command completion */
13459 cq = phba->sli4_hba.nvme_cq[qidx];
13463 if (phba->sli4_hba.fcp_cq_map &&
13464 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13465 /* Process FCP command completion */
13466 cq = phba->sli4_hba.fcp_cq[qidx];
13470 if (phba->sli4_hba.nvmels_cq &&
13471 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13472 /* Process NVME unsol rcv */
13473 cq = phba->sli4_hba.nvmels_cq;
13476 /* Otherwise this is a Slow path event */
13478 ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
13479 phba->sli4_hba.hba_eq[qidx]);
13484 if (unlikely(cqid != cq->queue_id)) {
13485 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13486 "0368 Miss-matched fast-path completion "
13487 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13488 cqid, cq->queue_id);
13492 /* Save EQ associated with this CQ */
13493 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13495 /* Process all the entries to the CQ */
13496 while ((cqe = lpfc_sli4_cq_get(cq))) {
13497 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13498 if (!(++ecount % cq->entry_repost))
13502 /* Track the max number of CQEs processed in 1 EQ */
13503 if (ecount > cq->CQ_max_cqe)
13504 cq->CQ_max_cqe = ecount;
13505 cq->assoc_qp->EQ_cqe_cnt += ecount;
13507 /* Catch the no cq entry condition */
13508 if (unlikely(ecount == 0))
13509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13510 "0369 No entry from fast-path completion "
13511 "queue fcpcqid=%d\n", cq->queue_id);
13513 /* In any case, flash and re-arm the CQ */
13514 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13516 /* wake up worker thread if there are works to be done */
13518 lpfc_worker_wake_up(phba);
13524 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13526 struct lpfc_eqe *eqe;
13528 /* walk all the EQ entries and drop on the floor */
13529 while ((eqe = lpfc_sli4_eq_get(eq)))
13532 /* Clear and re-arm the EQ */
13533 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13538 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13540 * @phba: Pointer to HBA context object.
13541 * @eqe: Pointer to fast-path event queue entry.
13543 * This routine process a event queue entry from the Flash Optimized Fabric
13544 * event queue. It will check the MajorCode and MinorCode to determine this
13545 * is for a completion event on a completion queue, if not, an error shall be
13546 * logged and just return. Otherwise, it will get to the corresponding
13547 * completion queue and process all the entries on the completion queue, rearm
13548 * the completion queue, and then return.
13551 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13553 struct lpfc_queue *cq;
13554 struct lpfc_cqe *cqe;
13555 bool workposted = false;
13559 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13561 "9147 Not a valid completion "
13562 "event: majorcode=x%x, minorcode=x%x\n",
13563 bf_get_le32(lpfc_eqe_major_code, eqe),
13564 bf_get_le32(lpfc_eqe_minor_code, eqe));
13568 /* Get the reference to the corresponding CQ */
13569 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13571 /* Next check for OAS */
13572 cq = phba->sli4_hba.oas_cq;
13573 if (unlikely(!cq)) {
13574 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13576 "9148 OAS completion queue "
13577 "does not exist\n");
13581 if (unlikely(cqid != cq->queue_id)) {
13582 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13583 "9149 Miss-matched fast-path compl "
13584 "queue id: eqcqid=%d, fcpcqid=%d\n",
13585 cqid, cq->queue_id);
13589 /* Save EQ associated with this CQ */
13590 cq->assoc_qp = phba->sli4_hba.fof_eq;
13592 /* Process all the entries to the OAS CQ */
13593 while ((cqe = lpfc_sli4_cq_get(cq))) {
13594 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13595 if (!(++ecount % cq->entry_repost))
13599 /* Track the max number of CQEs processed in 1 EQ */
13600 if (ecount > cq->CQ_max_cqe)
13601 cq->CQ_max_cqe = ecount;
13602 cq->assoc_qp->EQ_cqe_cnt += ecount;
13604 /* Catch the no cq entry condition */
13605 if (unlikely(ecount == 0))
13606 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13607 "9153 No entry from fast-path completion "
13608 "queue fcpcqid=%d\n", cq->queue_id);
13610 /* In any case, flash and re-arm the CQ */
13611 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13613 /* wake up worker thread if there are works to be done */
13615 lpfc_worker_wake_up(phba);
13619 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13620 * @irq: Interrupt number.
13621 * @dev_id: The device context pointer.
13623 * This function is directly called from the PCI layer as an interrupt
13624 * service routine when device with SLI-4 interface spec is enabled with
13625 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13626 * IOCB ring event in the HBA. However, when the device is enabled with either
13627 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13628 * device-level interrupt handler. When the PCI slot is in error recovery
13629 * or the HBA is undergoing initialization, the interrupt handler will not
13630 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13631 * the intrrupt context. This function is called without any lock held.
13632 * It gets the hbalock to access and update SLI data structures. Note that,
13633 * the EQ to CQ are one-to-one map such that the EQ index is
13634 * equal to that of CQ index.
13636 * This function returns IRQ_HANDLED when interrupt is handled else it
13637 * returns IRQ_NONE.
13640 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13642 struct lpfc_hba *phba;
13643 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13644 struct lpfc_queue *eq;
13645 struct lpfc_eqe *eqe;
13646 unsigned long iflag;
13649 /* Get the driver's phba structure from the dev_id */
13650 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13651 phba = hba_eq_hdl->phba;
13653 if (unlikely(!phba))
13656 /* Get to the EQ struct associated with this vector */
13657 eq = phba->sli4_hba.fof_eq;
13661 /* Check device state for handling interrupt */
13662 if (unlikely(lpfc_intr_state_check(phba))) {
13663 /* Check again for link_state with lock held */
13664 spin_lock_irqsave(&phba->hbalock, iflag);
13665 if (phba->link_state < LPFC_LINK_DOWN)
13666 /* Flush, clear interrupt, and rearm the EQ */
13667 lpfc_sli4_eq_flush(phba, eq);
13668 spin_unlock_irqrestore(&phba->hbalock, iflag);
13673 * Process all the event on FCP fast-path EQ
13675 while ((eqe = lpfc_sli4_eq_get(eq))) {
13676 lpfc_sli4_fof_handle_eqe(phba, eqe);
13677 if (!(++ecount % eq->entry_repost))
13679 eq->EQ_processed++;
13682 /* Track the max number of EQEs processed in 1 intr */
13683 if (ecount > eq->EQ_max_eqe)
13684 eq->EQ_max_eqe = ecount;
13687 if (unlikely(ecount == 0)) {
13690 if (phba->intr_type == MSIX)
13691 /* MSI-X treated interrupt served as no EQ share INT */
13692 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13693 "9145 MSI-X interrupt with no EQE\n");
13695 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13696 "9146 ISR interrupt with no EQE\n");
13697 /* Non MSI-X treated on interrupt as EQ share INT */
13701 /* Always clear and re-arm the fast-path EQ */
13702 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13703 return IRQ_HANDLED;
13707 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13708 * @irq: Interrupt number.
13709 * @dev_id: The device context pointer.
13711 * This function is directly called from the PCI layer as an interrupt
13712 * service routine when device with SLI-4 interface spec is enabled with
13713 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13714 * ring event in the HBA. However, when the device is enabled with either
13715 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13716 * device-level interrupt handler. When the PCI slot is in error recovery
13717 * or the HBA is undergoing initialization, the interrupt handler will not
13718 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13719 * the intrrupt context. This function is called without any lock held.
13720 * It gets the hbalock to access and update SLI data structures. Note that,
13721 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13722 * equal to that of FCP CQ index.
13724 * The link attention and ELS ring attention events are handled
13725 * by the worker thread. The interrupt handler signals the worker thread
13726 * and returns for these events. This function is called without any lock
13727 * held. It gets the hbalock to access and update SLI data structures.
13729 * This function returns IRQ_HANDLED when interrupt is handled else it
13730 * returns IRQ_NONE.
13733 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13735 struct lpfc_hba *phba;
13736 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13737 struct lpfc_queue *fpeq;
13738 struct lpfc_eqe *eqe;
13739 unsigned long iflag;
13744 /* Get the driver's phba structure from the dev_id */
13745 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13746 phba = hba_eq_hdl->phba;
13747 hba_eqidx = hba_eq_hdl->idx;
13749 if (unlikely(!phba))
13751 if (unlikely(!phba->sli4_hba.hba_eq))
13754 /* Get to the EQ struct associated with this vector */
13755 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13756 if (unlikely(!fpeq))
13759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13760 if (phba->ktime_on)
13761 fpeq->isr_timestamp = ktime_get_ns();
13764 if (lpfc_fcp_look_ahead) {
13765 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13766 lpfc_sli4_eq_clr_intr(fpeq);
13768 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13773 /* Check device state for handling interrupt */
13774 if (unlikely(lpfc_intr_state_check(phba))) {
13775 /* Check again for link_state with lock held */
13776 spin_lock_irqsave(&phba->hbalock, iflag);
13777 if (phba->link_state < LPFC_LINK_DOWN)
13778 /* Flush, clear interrupt, and rearm the EQ */
13779 lpfc_sli4_eq_flush(phba, fpeq);
13780 spin_unlock_irqrestore(&phba->hbalock, iflag);
13781 if (lpfc_fcp_look_ahead)
13782 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13787 * Process all the event on FCP fast-path EQ
13789 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
13793 ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13794 if (!(++ecount % fpeq->entry_repost) ||
13795 ccount > LPFC_MAX_ISR_CQE)
13797 fpeq->EQ_processed++;
13800 /* Track the max number of EQEs processed in 1 intr */
13801 if (ecount > fpeq->EQ_max_eqe)
13802 fpeq->EQ_max_eqe = ecount;
13804 /* Always clear and re-arm the fast-path EQ */
13805 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13807 if (unlikely(ecount == 0)) {
13808 fpeq->EQ_no_entry++;
13810 if (lpfc_fcp_look_ahead) {
13811 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13815 if (phba->intr_type == MSIX)
13816 /* MSI-X treated interrupt served as no EQ share INT */
13817 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13818 "0358 MSI-X interrupt with no EQE\n");
13820 /* Non MSI-X treated on interrupt as EQ share INT */
13824 if (lpfc_fcp_look_ahead)
13825 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13827 return IRQ_HANDLED;
13828 } /* lpfc_sli4_fp_intr_handler */
13831 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13832 * @irq: Interrupt number.
13833 * @dev_id: The device context pointer.
13835 * This function is the device-level interrupt handler to device with SLI-4
13836 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13837 * interrupt mode is enabled and there is an event in the HBA which requires
13838 * driver attention. This function invokes the slow-path interrupt attention
13839 * handling function and fast-path interrupt attention handling function in
13840 * turn to process the relevant HBA attention events. This function is called
13841 * without any lock held. It gets the hbalock to access and update SLI data
13844 * This function returns IRQ_HANDLED when interrupt is handled, else it
13845 * returns IRQ_NONE.
13848 lpfc_sli4_intr_handler(int irq, void *dev_id)
13850 struct lpfc_hba *phba;
13851 irqreturn_t hba_irq_rc;
13852 bool hba_handled = false;
13855 /* Get the driver's phba structure from the dev_id */
13856 phba = (struct lpfc_hba *)dev_id;
13858 if (unlikely(!phba))
13862 * Invoke fast-path host attention interrupt handling as appropriate.
13864 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
13865 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
13866 &phba->sli4_hba.hba_eq_hdl[qidx]);
13867 if (hba_irq_rc == IRQ_HANDLED)
13868 hba_handled |= true;
13871 if (phba->cfg_fof) {
13872 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
13873 &phba->sli4_hba.hba_eq_hdl[qidx]);
13874 if (hba_irq_rc == IRQ_HANDLED)
13875 hba_handled |= true;
13878 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
13879 } /* lpfc_sli4_intr_handler */
13882 * lpfc_sli4_queue_free - free a queue structure and associated memory
13883 * @queue: The queue structure to free.
13885 * This function frees a queue structure and the DMAable memory used for
13886 * the host resident queue. This function must be called after destroying the
13887 * queue on the HBA.
13890 lpfc_sli4_queue_free(struct lpfc_queue *queue)
13892 struct lpfc_dmabuf *dmabuf;
13897 while (!list_empty(&queue->page_list)) {
13898 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13900 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
13901 dmabuf->virt, dmabuf->phys);
13905 lpfc_free_rq_buffer(queue->phba, queue);
13906 kfree(queue->rqbp);
13909 if (!list_empty(&queue->wq_list))
13910 list_del(&queue->wq_list);
13917 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13918 * @phba: The HBA that this queue is being created on.
13919 * @entry_size: The size of each queue entry for this queue.
13920 * @entry count: The number of entries that this queue will handle.
13922 * This function allocates a queue structure and the DMAable memory used for
13923 * the host resident queue. This function must be called before creating the
13924 * queue on the HBA.
13926 struct lpfc_queue *
13927 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13928 uint32_t entry_count)
13930 struct lpfc_queue *queue;
13931 struct lpfc_dmabuf *dmabuf;
13932 int x, total_qe_count;
13934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13936 if (!phba->sli4_hba.pc_sli4_params.supported)
13937 hw_page_size = SLI4_PAGE_SIZE;
13939 queue = kzalloc(sizeof(struct lpfc_queue) +
13940 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13943 queue->page_count = (ALIGN(entry_size * entry_count,
13944 hw_page_size))/hw_page_size;
13946 /* If needed, Adjust page count to match the max the adapter supports */
13947 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
13948 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
13949 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13951 INIT_LIST_HEAD(&queue->list);
13952 INIT_LIST_HEAD(&queue->wq_list);
13953 INIT_LIST_HEAD(&queue->page_list);
13954 INIT_LIST_HEAD(&queue->child_list);
13955 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13956 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13959 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13960 hw_page_size, &dmabuf->phys,
13962 if (!dmabuf->virt) {
13966 dmabuf->buffer_tag = x;
13967 list_add_tail(&dmabuf->list, &queue->page_list);
13968 /* initialize queue's entry array */
13969 dma_pointer = dmabuf->virt;
13970 for (; total_qe_count < entry_count &&
13971 dma_pointer < (hw_page_size + dmabuf->virt);
13972 total_qe_count++, dma_pointer += entry_size) {
13973 queue->qe[total_qe_count].address = dma_pointer;
13976 queue->entry_size = entry_size;
13977 queue->entry_count = entry_count;
13978 queue->phba = phba;
13980 /* entry_repost will be set during q creation */
13984 lpfc_sli4_queue_free(queue);
13989 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13990 * @phba: HBA structure that indicates port to create a queue on.
13991 * @pci_barset: PCI BAR set flag.
13993 * This function shall perform iomap of the specified PCI BAR address to host
13994 * memory address if not already done so and return it. The returned host
13995 * memory address can be NULL.
13997 static void __iomem *
13998 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14003 switch (pci_barset) {
14004 case WQ_PCI_BAR_0_AND_1:
14005 return phba->pci_bar0_memmap_p;
14006 case WQ_PCI_BAR_2_AND_3:
14007 return phba->pci_bar2_memmap_p;
14008 case WQ_PCI_BAR_4_AND_5:
14009 return phba->pci_bar4_memmap_p;
14017 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14018 * @phba: HBA structure that indicates port to create a queue on.
14019 * @startq: The starting FCP EQ to modify
14021 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14022 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14023 * updated in one mailbox command.
14025 * The @phba struct is used to send mailbox command to HBA. The @startq
14026 * is used to get the starting FCP EQ to change.
14027 * This function is asynchronous and will wait for the mailbox
14028 * command to finish before continuing.
14030 * On success this function will return a zero. If unable to allocate enough
14031 * memory this function will return -ENOMEM. If the queue create mailbox command
14032 * fails this function will return -ENXIO.
14035 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14036 uint32_t numq, uint32_t imax)
14038 struct lpfc_mbx_modify_eq_delay *eq_delay;
14039 LPFC_MBOXQ_t *mbox;
14040 struct lpfc_queue *eq;
14041 int cnt, rc, length, status = 0;
14042 uint32_t shdr_status, shdr_add_status;
14043 uint32_t result, val;
14045 union lpfc_sli4_cfg_shdr *shdr;
14048 if (startq >= phba->io_channel_irqs)
14051 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14054 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14055 sizeof(struct lpfc_sli4_cfg_mhdr));
14056 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14057 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14058 length, LPFC_SLI4_MBX_EMBED);
14059 eq_delay = &mbox->u.mqe.un.eq_delay;
14061 /* Calculate delay multiper from maximum interrupt per second */
14062 result = imax / phba->io_channel_irqs;
14063 if (result > LPFC_DMULT_CONST || result == 0)
14066 dmult = LPFC_DMULT_CONST/result - 1;
14067 if (dmult > LPFC_DMULT_MAX)
14068 dmult = LPFC_DMULT_MAX;
14071 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14072 eq = phba->sli4_hba.hba_eq[qidx];
14076 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14077 eq_delay->u.request.eq[cnt].phase = 0;
14078 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14081 /* q_mode is only used for auto_imax */
14082 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14083 /* Use EQ Delay Register method for q_mode */
14085 /* Convert for EQ Delay register */
14086 val = phba->cfg_fcp_imax;
14088 /* First, interrupts per sec per EQ */
14089 val = phba->cfg_fcp_imax /
14090 phba->io_channel_irqs;
14092 /* us delay between each interrupt */
14093 val = LPFC_SEC_TO_USEC / val;
14103 eq_delay->u.request.num_eq = cnt;
14105 mbox->vport = phba->pport;
14106 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14107 mbox->context1 = NULL;
14108 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14109 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14110 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14111 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14112 if (shdr_status || shdr_add_status || rc) {
14113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14114 "2512 MODIFY_EQ_DELAY mailbox failed with "
14115 "status x%x add_status x%x, mbx status x%x\n",
14116 shdr_status, shdr_add_status, rc);
14119 mempool_free(mbox, phba->mbox_mem_pool);
14124 * lpfc_eq_create - Create an Event Queue on the HBA
14125 * @phba: HBA structure that indicates port to create a queue on.
14126 * @eq: The queue structure to use to create the event queue.
14127 * @imax: The maximum interrupt per second limit.
14129 * This function creates an event queue, as detailed in @eq, on a port,
14130 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14132 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14133 * is used to get the entry count and entry size that are necessary to
14134 * determine the number of pages to allocate and use for this queue. This
14135 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14136 * event queue. This function is asynchronous and will wait for the mailbox
14137 * command to finish before continuing.
14139 * On success this function will return a zero. If unable to allocate enough
14140 * memory this function will return -ENOMEM. If the queue create mailbox command
14141 * fails this function will return -ENXIO.
14144 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14146 struct lpfc_mbx_eq_create *eq_create;
14147 LPFC_MBOXQ_t *mbox;
14148 int rc, length, status = 0;
14149 struct lpfc_dmabuf *dmabuf;
14150 uint32_t shdr_status, shdr_add_status;
14151 union lpfc_sli4_cfg_shdr *shdr;
14153 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14155 /* sanity check on queue memory */
14158 if (!phba->sli4_hba.pc_sli4_params.supported)
14159 hw_page_size = SLI4_PAGE_SIZE;
14161 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14164 length = (sizeof(struct lpfc_mbx_eq_create) -
14165 sizeof(struct lpfc_sli4_cfg_mhdr));
14166 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14167 LPFC_MBOX_OPCODE_EQ_CREATE,
14168 length, LPFC_SLI4_MBX_EMBED);
14169 eq_create = &mbox->u.mqe.un.eq_create;
14170 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14172 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14174 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14175 /* don't setup delay multiplier using EQ_CREATE */
14177 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14179 switch (eq->entry_count) {
14181 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14182 "0360 Unsupported EQ count. (%d)\n",
14184 if (eq->entry_count < 256)
14186 /* otherwise default to smallest count (drop through) */
14188 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14192 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14196 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14200 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14204 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14208 list_for_each_entry(dmabuf, &eq->page_list, list) {
14209 memset(dmabuf->virt, 0, hw_page_size);
14210 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14211 putPaddrLow(dmabuf->phys);
14212 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14213 putPaddrHigh(dmabuf->phys);
14215 mbox->vport = phba->pport;
14216 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14217 mbox->context1 = NULL;
14218 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14219 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14222 if (shdr_status || shdr_add_status || rc) {
14223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14224 "2500 EQ_CREATE mailbox failed with "
14225 "status x%x add_status x%x, mbx status x%x\n",
14226 shdr_status, shdr_add_status, rc);
14229 eq->type = LPFC_EQ;
14230 eq->subtype = LPFC_NONE;
14231 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14232 if (eq->queue_id == 0xFFFF)
14234 eq->host_index = 0;
14236 eq->entry_repost = LPFC_EQ_REPOST;
14238 mempool_free(mbox, phba->mbox_mem_pool);
14243 * lpfc_cq_create - Create a Completion Queue on the HBA
14244 * @phba: HBA structure that indicates port to create a queue on.
14245 * @cq: The queue structure to use to create the completion queue.
14246 * @eq: The event queue to bind this completion queue to.
14248 * This function creates a completion queue, as detailed in @wq, on a port,
14249 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14251 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14252 * is used to get the entry count and entry size that are necessary to
14253 * determine the number of pages to allocate and use for this queue. The @eq
14254 * is used to indicate which event queue to bind this completion queue to. This
14255 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14256 * completion queue. This function is asynchronous and will wait for the mailbox
14257 * command to finish before continuing.
14259 * On success this function will return a zero. If unable to allocate enough
14260 * memory this function will return -ENOMEM. If the queue create mailbox command
14261 * fails this function will return -ENXIO.
14264 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14265 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14267 struct lpfc_mbx_cq_create *cq_create;
14268 struct lpfc_dmabuf *dmabuf;
14269 LPFC_MBOXQ_t *mbox;
14270 int rc, length, status = 0;
14271 uint32_t shdr_status, shdr_add_status;
14272 union lpfc_sli4_cfg_shdr *shdr;
14273 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14275 /* sanity check on queue memory */
14278 if (!phba->sli4_hba.pc_sli4_params.supported)
14279 hw_page_size = SLI4_PAGE_SIZE;
14281 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14284 length = (sizeof(struct lpfc_mbx_cq_create) -
14285 sizeof(struct lpfc_sli4_cfg_mhdr));
14286 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14287 LPFC_MBOX_OPCODE_CQ_CREATE,
14288 length, LPFC_SLI4_MBX_EMBED);
14289 cq_create = &mbox->u.mqe.un.cq_create;
14290 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14291 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14293 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14294 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14295 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14296 phba->sli4_hba.pc_sli4_params.cqv);
14297 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14298 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
14299 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
14300 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14303 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14306 switch (cq->entry_count) {
14308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14309 "0361 Unsupported CQ count: "
14310 "entry cnt %d sz %d pg cnt %d\n",
14311 cq->entry_count, cq->entry_size,
14313 if (cq->entry_count < 256) {
14317 /* otherwise default to smallest count (drop through) */
14319 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14323 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14327 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14331 list_for_each_entry(dmabuf, &cq->page_list, list) {
14332 memset(dmabuf->virt, 0, hw_page_size);
14333 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14334 putPaddrLow(dmabuf->phys);
14335 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14336 putPaddrHigh(dmabuf->phys);
14338 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14340 /* The IOCTL status is embedded in the mailbox subheader. */
14341 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14342 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14343 if (shdr_status || shdr_add_status || rc) {
14344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14345 "2501 CQ_CREATE mailbox failed with "
14346 "status x%x add_status x%x, mbx status x%x\n",
14347 shdr_status, shdr_add_status, rc);
14351 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14352 if (cq->queue_id == 0xFFFF) {
14356 /* link the cq onto the parent eq child list */
14357 list_add_tail(&cq->list, &eq->child_list);
14358 /* Set up completion queue's type and subtype */
14360 cq->subtype = subtype;
14361 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14362 cq->assoc_qid = eq->queue_id;
14363 cq->host_index = 0;
14365 cq->entry_repost = LPFC_CQ_REPOST;
14368 mempool_free(mbox, phba->mbox_mem_pool);
14373 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14374 * @phba: HBA structure that indicates port to create a queue on.
14375 * @cqp: The queue structure array to use to create the completion queues.
14376 * @eqp: The event queue array to bind these completion queues to.
14378 * This function creates a set of completion queue, s to support MRQ
14379 * as detailed in @cqp, on a port,
14380 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14382 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14383 * is used to get the entry count and entry size that are necessary to
14384 * determine the number of pages to allocate and use for this queue. The @eq
14385 * is used to indicate which event queue to bind this completion queue to. This
14386 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14387 * completion queue. This function is asynchronous and will wait for the mailbox
14388 * command to finish before continuing.
14390 * On success this function will return a zero. If unable to allocate enough
14391 * memory this function will return -ENOMEM. If the queue create mailbox command
14392 * fails this function will return -ENXIO.
14395 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14396 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14398 struct lpfc_queue *cq;
14399 struct lpfc_queue *eq;
14400 struct lpfc_mbx_cq_create_set *cq_set;
14401 struct lpfc_dmabuf *dmabuf;
14402 LPFC_MBOXQ_t *mbox;
14403 int rc, length, alloclen, status = 0;
14404 int cnt, idx, numcq, page_idx = 0;
14405 uint32_t shdr_status, shdr_add_status;
14406 union lpfc_sli4_cfg_shdr *shdr;
14407 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14409 /* sanity check on queue memory */
14410 numcq = phba->cfg_nvmet_mrq;
14411 if (!cqp || !eqp || !numcq)
14413 if (!phba->sli4_hba.pc_sli4_params.supported)
14414 hw_page_size = SLI4_PAGE_SIZE;
14416 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14420 length = sizeof(struct lpfc_mbx_cq_create_set);
14421 length += ((numcq * cqp[0]->page_count) *
14422 sizeof(struct dma_address));
14423 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14424 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14425 LPFC_SLI4_MBX_NEMBED);
14426 if (alloclen < length) {
14427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14428 "3098 Allocated DMA memory size (%d) is "
14429 "less than the requested DMA memory size "
14430 "(%d)\n", alloclen, length);
14434 cq_set = mbox->sge_array->addr[0];
14435 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14436 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14438 for (idx = 0; idx < numcq; idx++) {
14448 bf_set(lpfc_mbx_cq_create_set_page_size,
14449 &cq_set->u.request,
14450 (hw_page_size / SLI4_PAGE_SIZE));
14451 bf_set(lpfc_mbx_cq_create_set_num_pages,
14452 &cq_set->u.request, cq->page_count);
14453 bf_set(lpfc_mbx_cq_create_set_evt,
14454 &cq_set->u.request, 1);
14455 bf_set(lpfc_mbx_cq_create_set_valid,
14456 &cq_set->u.request, 1);
14457 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14458 &cq_set->u.request, 0);
14459 bf_set(lpfc_mbx_cq_create_set_num_cq,
14460 &cq_set->u.request, numcq);
14461 switch (cq->entry_count) {
14463 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14464 "3118 Bad CQ count. (%d)\n",
14466 if (cq->entry_count < 256) {
14470 /* otherwise default to smallest (drop thru) */
14472 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14473 &cq_set->u.request, LPFC_CQ_CNT_256);
14476 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14477 &cq_set->u.request, LPFC_CQ_CNT_512);
14480 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14481 &cq_set->u.request, LPFC_CQ_CNT_1024);
14484 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14485 &cq_set->u.request, eq->queue_id);
14488 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14489 &cq_set->u.request, eq->queue_id);
14492 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14493 &cq_set->u.request, eq->queue_id);
14496 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14497 &cq_set->u.request, eq->queue_id);
14500 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14501 &cq_set->u.request, eq->queue_id);
14504 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14505 &cq_set->u.request, eq->queue_id);
14508 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14509 &cq_set->u.request, eq->queue_id);
14512 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14513 &cq_set->u.request, eq->queue_id);
14516 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14517 &cq_set->u.request, eq->queue_id);
14520 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14521 &cq_set->u.request, eq->queue_id);
14524 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14525 &cq_set->u.request, eq->queue_id);
14528 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14529 &cq_set->u.request, eq->queue_id);
14532 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14533 &cq_set->u.request, eq->queue_id);
14536 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14537 &cq_set->u.request, eq->queue_id);
14540 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14541 &cq_set->u.request, eq->queue_id);
14544 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14545 &cq_set->u.request, eq->queue_id);
14549 /* link the cq onto the parent eq child list */
14550 list_add_tail(&cq->list, &eq->child_list);
14551 /* Set up completion queue's type and subtype */
14553 cq->subtype = subtype;
14554 cq->assoc_qid = eq->queue_id;
14555 cq->host_index = 0;
14557 cq->entry_repost = LPFC_CQ_REPOST;
14560 list_for_each_entry(dmabuf, &cq->page_list, list) {
14561 memset(dmabuf->virt, 0, hw_page_size);
14562 cnt = page_idx + dmabuf->buffer_tag;
14563 cq_set->u.request.page[cnt].addr_lo =
14564 putPaddrLow(dmabuf->phys);
14565 cq_set->u.request.page[cnt].addr_hi =
14566 putPaddrHigh(dmabuf->phys);
14572 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14574 /* The IOCTL status is embedded in the mailbox subheader. */
14575 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14576 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14577 if (shdr_status || shdr_add_status || rc) {
14578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14579 "3119 CQ_CREATE_SET mailbox failed with "
14580 "status x%x add_status x%x, mbx status x%x\n",
14581 shdr_status, shdr_add_status, rc);
14585 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14586 if (rc == 0xFFFF) {
14591 for (idx = 0; idx < numcq; idx++) {
14593 cq->queue_id = rc + idx;
14597 lpfc_sli4_mbox_cmd_free(phba, mbox);
14602 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14603 * @phba: HBA structure that indicates port to create a queue on.
14604 * @mq: The queue structure to use to create the mailbox queue.
14605 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14606 * @cq: The completion queue to associate with this cq.
14608 * This function provides failback (fb) functionality when the
14609 * mq_create_ext fails on older FW generations. It's purpose is identical
14610 * to mq_create_ext otherwise.
14612 * This routine cannot fail as all attributes were previously accessed and
14613 * initialized in mq_create_ext.
14616 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14617 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14619 struct lpfc_mbx_mq_create *mq_create;
14620 struct lpfc_dmabuf *dmabuf;
14623 length = (sizeof(struct lpfc_mbx_mq_create) -
14624 sizeof(struct lpfc_sli4_cfg_mhdr));
14625 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14626 LPFC_MBOX_OPCODE_MQ_CREATE,
14627 length, LPFC_SLI4_MBX_EMBED);
14628 mq_create = &mbox->u.mqe.un.mq_create;
14629 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14631 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14633 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14634 switch (mq->entry_count) {
14636 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14637 LPFC_MQ_RING_SIZE_16);
14640 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14641 LPFC_MQ_RING_SIZE_32);
14644 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14645 LPFC_MQ_RING_SIZE_64);
14648 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14649 LPFC_MQ_RING_SIZE_128);
14652 list_for_each_entry(dmabuf, &mq->page_list, list) {
14653 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14654 putPaddrLow(dmabuf->phys);
14655 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14656 putPaddrHigh(dmabuf->phys);
14661 * lpfc_mq_create - Create a mailbox Queue on the HBA
14662 * @phba: HBA structure that indicates port to create a queue on.
14663 * @mq: The queue structure to use to create the mailbox queue.
14664 * @cq: The completion queue to associate with this cq.
14665 * @subtype: The queue's subtype.
14667 * This function creates a mailbox queue, as detailed in @mq, on a port,
14668 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14670 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14671 * is used to get the entry count and entry size that are necessary to
14672 * determine the number of pages to allocate and use for this queue. This
14673 * function will send the MQ_CREATE mailbox command to the HBA to setup the
14674 * mailbox queue. This function is asynchronous and will wait for the mailbox
14675 * command to finish before continuing.
14677 * On success this function will return a zero. If unable to allocate enough
14678 * memory this function will return -ENOMEM. If the queue create mailbox command
14679 * fails this function will return -ENXIO.
14682 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14683 struct lpfc_queue *cq, uint32_t subtype)
14685 struct lpfc_mbx_mq_create *mq_create;
14686 struct lpfc_mbx_mq_create_ext *mq_create_ext;
14687 struct lpfc_dmabuf *dmabuf;
14688 LPFC_MBOXQ_t *mbox;
14689 int rc, length, status = 0;
14690 uint32_t shdr_status, shdr_add_status;
14691 union lpfc_sli4_cfg_shdr *shdr;
14692 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14694 /* sanity check on queue memory */
14697 if (!phba->sli4_hba.pc_sli4_params.supported)
14698 hw_page_size = SLI4_PAGE_SIZE;
14700 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14703 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14704 sizeof(struct lpfc_sli4_cfg_mhdr));
14705 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14706 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14707 length, LPFC_SLI4_MBX_EMBED);
14709 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
14710 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
14711 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14712 &mq_create_ext->u.request, mq->page_count);
14713 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14714 &mq_create_ext->u.request, 1);
14715 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14716 &mq_create_ext->u.request, 1);
14717 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14718 &mq_create_ext->u.request, 1);
14719 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14720 &mq_create_ext->u.request, 1);
14721 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14722 &mq_create_ext->u.request, 1);
14723 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14724 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14725 phba->sli4_hba.pc_sli4_params.mqv);
14726 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14727 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14730 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14732 switch (mq->entry_count) {
14734 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14735 "0362 Unsupported MQ count. (%d)\n",
14737 if (mq->entry_count < 16) {
14741 /* otherwise default to smallest count (drop through) */
14743 bf_set(lpfc_mq_context_ring_size,
14744 &mq_create_ext->u.request.context,
14745 LPFC_MQ_RING_SIZE_16);
14748 bf_set(lpfc_mq_context_ring_size,
14749 &mq_create_ext->u.request.context,
14750 LPFC_MQ_RING_SIZE_32);
14753 bf_set(lpfc_mq_context_ring_size,
14754 &mq_create_ext->u.request.context,
14755 LPFC_MQ_RING_SIZE_64);
14758 bf_set(lpfc_mq_context_ring_size,
14759 &mq_create_ext->u.request.context,
14760 LPFC_MQ_RING_SIZE_128);
14763 list_for_each_entry(dmabuf, &mq->page_list, list) {
14764 memset(dmabuf->virt, 0, hw_page_size);
14765 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
14766 putPaddrLow(dmabuf->phys);
14767 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
14768 putPaddrHigh(dmabuf->phys);
14770 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14771 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14772 &mq_create_ext->u.response);
14773 if (rc != MBX_SUCCESS) {
14774 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14775 "2795 MQ_CREATE_EXT failed with "
14776 "status x%x. Failback to MQ_CREATE.\n",
14778 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14779 mq_create = &mbox->u.mqe.un.mq_create;
14780 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14781 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14782 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14783 &mq_create->u.response);
14786 /* The IOCTL status is embedded in the mailbox subheader. */
14787 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14788 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14789 if (shdr_status || shdr_add_status || rc) {
14790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14791 "2502 MQ_CREATE mailbox failed with "
14792 "status x%x add_status x%x, mbx status x%x\n",
14793 shdr_status, shdr_add_status, rc);
14797 if (mq->queue_id == 0xFFFF) {
14801 mq->type = LPFC_MQ;
14802 mq->assoc_qid = cq->queue_id;
14803 mq->subtype = subtype;
14804 mq->host_index = 0;
14806 mq->entry_repost = LPFC_MQ_REPOST;
14808 /* link the mq onto the parent cq child list */
14809 list_add_tail(&mq->list, &cq->child_list);
14811 mempool_free(mbox, phba->mbox_mem_pool);
14816 * lpfc_wq_create - Create a Work Queue on the HBA
14817 * @phba: HBA structure that indicates port to create a queue on.
14818 * @wq: The queue structure to use to create the work queue.
14819 * @cq: The completion queue to bind this work queue to.
14820 * @subtype: The subtype of the work queue indicating its functionality.
14822 * This function creates a work queue, as detailed in @wq, on a port, described
14823 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14825 * The @phba struct is used to send mailbox command to HBA. The @wq struct
14826 * is used to get the entry count and entry size that are necessary to
14827 * determine the number of pages to allocate and use for this queue. The @cq
14828 * is used to indicate which completion queue to bind this work queue to. This
14829 * function will send the WQ_CREATE mailbox command to the HBA to setup the
14830 * work queue. This function is asynchronous and will wait for the mailbox
14831 * command to finish before continuing.
14833 * On success this function will return a zero. If unable to allocate enough
14834 * memory this function will return -ENOMEM. If the queue create mailbox command
14835 * fails this function will return -ENXIO.
14838 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14839 struct lpfc_queue *cq, uint32_t subtype)
14841 struct lpfc_mbx_wq_create *wq_create;
14842 struct lpfc_dmabuf *dmabuf;
14843 LPFC_MBOXQ_t *mbox;
14844 int rc, length, status = 0;
14845 uint32_t shdr_status, shdr_add_status;
14846 union lpfc_sli4_cfg_shdr *shdr;
14847 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14848 struct dma_address *page;
14849 void __iomem *bar_memmap_p;
14850 uint32_t db_offset;
14851 uint16_t pci_barset;
14853 /* sanity check on queue memory */
14856 if (!phba->sli4_hba.pc_sli4_params.supported)
14857 hw_page_size = SLI4_PAGE_SIZE;
14859 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14862 length = (sizeof(struct lpfc_mbx_wq_create) -
14863 sizeof(struct lpfc_sli4_cfg_mhdr));
14864 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14865 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14866 length, LPFC_SLI4_MBX_EMBED);
14867 wq_create = &mbox->u.mqe.un.wq_create;
14868 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
14869 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14871 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14874 /* wqv is the earliest version supported, NOT the latest */
14875 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14876 phba->sli4_hba.pc_sli4_params.wqv);
14878 switch (phba->sli4_hba.pc_sli4_params.wqv) {
14879 case LPFC_Q_CREATE_VERSION_0:
14880 switch (wq->entry_size) {
14883 /* Nothing to do, version 0 ONLY supports 64 byte */
14884 page = wq_create->u.request.page;
14887 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14888 LPFC_WQ_SZ128_SUPPORT)) {
14892 /* If we get here the HBA MUST also support V1 and
14895 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14896 LPFC_Q_CREATE_VERSION_1);
14898 bf_set(lpfc_mbx_wq_create_wqe_count,
14899 &wq_create->u.request_1, wq->entry_count);
14900 bf_set(lpfc_mbx_wq_create_wqe_size,
14901 &wq_create->u.request_1,
14902 LPFC_WQ_WQE_SIZE_128);
14903 bf_set(lpfc_mbx_wq_create_page_size,
14904 &wq_create->u.request_1,
14905 LPFC_WQ_PAGE_SIZE_4096);
14906 page = wq_create->u.request_1.page;
14910 case LPFC_Q_CREATE_VERSION_1:
14911 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14913 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14914 LPFC_Q_CREATE_VERSION_1);
14916 switch (wq->entry_size) {
14919 bf_set(lpfc_mbx_wq_create_wqe_size,
14920 &wq_create->u.request_1,
14921 LPFC_WQ_WQE_SIZE_64);
14924 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14925 LPFC_WQ_SZ128_SUPPORT)) {
14929 bf_set(lpfc_mbx_wq_create_wqe_size,
14930 &wq_create->u.request_1,
14931 LPFC_WQ_WQE_SIZE_128);
14934 bf_set(lpfc_mbx_wq_create_page_size,
14935 &wq_create->u.request_1,
14936 LPFC_WQ_PAGE_SIZE_4096);
14937 page = wq_create->u.request_1.page;
14944 list_for_each_entry(dmabuf, &wq->page_list, list) {
14945 memset(dmabuf->virt, 0, hw_page_size);
14946 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14947 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
14950 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14951 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14953 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14954 /* The IOCTL status is embedded in the mailbox subheader. */
14955 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14956 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14957 if (shdr_status || shdr_add_status || rc) {
14958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14959 "2503 WQ_CREATE mailbox failed with "
14960 "status x%x add_status x%x, mbx status x%x\n",
14961 shdr_status, shdr_add_status, rc);
14965 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14966 if (wq->queue_id == 0xFFFF) {
14970 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14971 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14972 &wq_create->u.response);
14973 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14974 (wq->db_format != LPFC_DB_RING_FORMAT)) {
14975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14976 "3265 WQ[%d] doorbell format not "
14977 "supported: x%x\n", wq->queue_id,
14982 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14983 &wq_create->u.response);
14984 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14985 if (!bar_memmap_p) {
14986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14987 "3263 WQ[%d] failed to memmap pci "
14988 "barset:x%x\n", wq->queue_id,
14993 db_offset = wq_create->u.response.doorbell_offset;
14994 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14995 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14997 "3252 WQ[%d] doorbell offset not "
14998 "supported: x%x\n", wq->queue_id,
15003 wq->db_regaddr = bar_memmap_p + db_offset;
15004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15005 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15006 "format:x%x\n", wq->queue_id, pci_barset,
15007 db_offset, wq->db_format);
15009 wq->db_format = LPFC_DB_LIST_FORMAT;
15010 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15012 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15013 if (wq->pring == NULL) {
15017 wq->type = LPFC_WQ;
15018 wq->assoc_qid = cq->queue_id;
15019 wq->subtype = subtype;
15020 wq->host_index = 0;
15022 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15024 /* link the wq onto the parent cq child list */
15025 list_add_tail(&wq->list, &cq->child_list);
15027 mempool_free(mbox, phba->mbox_mem_pool);
15032 * lpfc_rq_create - Create a Receive Queue on the HBA
15033 * @phba: HBA structure that indicates port to create a queue on.
15034 * @hrq: The queue structure to use to create the header receive queue.
15035 * @drq: The queue structure to use to create the data receive queue.
15036 * @cq: The completion queue to bind this work queue to.
15038 * This function creates a receive buffer queue pair , as detailed in @hrq and
15039 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15042 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15043 * struct is used to get the entry count that is necessary to determine the
15044 * number of pages to use for this queue. The @cq is used to indicate which
15045 * completion queue to bind received buffers that are posted to these queues to.
15046 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15047 * receive queue pair. This function is asynchronous and will wait for the
15048 * mailbox command to finish before continuing.
15050 * On success this function will return a zero. If unable to allocate enough
15051 * memory this function will return -ENOMEM. If the queue create mailbox command
15052 * fails this function will return -ENXIO.
15055 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15056 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15058 struct lpfc_mbx_rq_create *rq_create;
15059 struct lpfc_dmabuf *dmabuf;
15060 LPFC_MBOXQ_t *mbox;
15061 int rc, length, status = 0;
15062 uint32_t shdr_status, shdr_add_status;
15063 union lpfc_sli4_cfg_shdr *shdr;
15064 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15065 void __iomem *bar_memmap_p;
15066 uint32_t db_offset;
15067 uint16_t pci_barset;
15069 /* sanity check on queue memory */
15070 if (!hrq || !drq || !cq)
15072 if (!phba->sli4_hba.pc_sli4_params.supported)
15073 hw_page_size = SLI4_PAGE_SIZE;
15075 if (hrq->entry_count != drq->entry_count)
15077 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15080 length = (sizeof(struct lpfc_mbx_rq_create) -
15081 sizeof(struct lpfc_sli4_cfg_mhdr));
15082 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15083 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15084 length, LPFC_SLI4_MBX_EMBED);
15085 rq_create = &mbox->u.mqe.un.rq_create;
15086 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15087 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15088 phba->sli4_hba.pc_sli4_params.rqv);
15089 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15090 bf_set(lpfc_rq_context_rqe_count_1,
15091 &rq_create->u.request.context,
15093 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15094 bf_set(lpfc_rq_context_rqe_size,
15095 &rq_create->u.request.context,
15097 bf_set(lpfc_rq_context_page_size,
15098 &rq_create->u.request.context,
15099 LPFC_RQ_PAGE_SIZE_4096);
15101 switch (hrq->entry_count) {
15103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15104 "2535 Unsupported RQ count. (%d)\n",
15106 if (hrq->entry_count < 512) {
15110 /* otherwise default to smallest count (drop through) */
15112 bf_set(lpfc_rq_context_rqe_count,
15113 &rq_create->u.request.context,
15114 LPFC_RQ_RING_SIZE_512);
15117 bf_set(lpfc_rq_context_rqe_count,
15118 &rq_create->u.request.context,
15119 LPFC_RQ_RING_SIZE_1024);
15122 bf_set(lpfc_rq_context_rqe_count,
15123 &rq_create->u.request.context,
15124 LPFC_RQ_RING_SIZE_2048);
15127 bf_set(lpfc_rq_context_rqe_count,
15128 &rq_create->u.request.context,
15129 LPFC_RQ_RING_SIZE_4096);
15132 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15133 LPFC_HDR_BUF_SIZE);
15135 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15137 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15139 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15140 memset(dmabuf->virt, 0, hw_page_size);
15141 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15142 putPaddrLow(dmabuf->phys);
15143 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15144 putPaddrHigh(dmabuf->phys);
15146 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15147 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15149 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15150 /* The IOCTL status is embedded in the mailbox subheader. */
15151 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15152 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15153 if (shdr_status || shdr_add_status || rc) {
15154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15155 "2504 RQ_CREATE mailbox failed with "
15156 "status x%x add_status x%x, mbx status x%x\n",
15157 shdr_status, shdr_add_status, rc);
15161 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15162 if (hrq->queue_id == 0xFFFF) {
15167 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15168 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15169 &rq_create->u.response);
15170 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15171 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15173 "3262 RQ [%d] doorbell format not "
15174 "supported: x%x\n", hrq->queue_id,
15180 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15181 &rq_create->u.response);
15182 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15183 if (!bar_memmap_p) {
15184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15185 "3269 RQ[%d] failed to memmap pci "
15186 "barset:x%x\n", hrq->queue_id,
15192 db_offset = rq_create->u.response.doorbell_offset;
15193 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15194 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15195 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15196 "3270 RQ[%d] doorbell offset not "
15197 "supported: x%x\n", hrq->queue_id,
15202 hrq->db_regaddr = bar_memmap_p + db_offset;
15203 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15204 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15205 "format:x%x\n", hrq->queue_id, pci_barset,
15206 db_offset, hrq->db_format);
15208 hrq->db_format = LPFC_DB_RING_FORMAT;
15209 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15211 hrq->type = LPFC_HRQ;
15212 hrq->assoc_qid = cq->queue_id;
15213 hrq->subtype = subtype;
15214 hrq->host_index = 0;
15215 hrq->hba_index = 0;
15216 hrq->entry_repost = LPFC_RQ_REPOST;
15218 /* now create the data queue */
15219 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15220 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15221 length, LPFC_SLI4_MBX_EMBED);
15222 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15223 phba->sli4_hba.pc_sli4_params.rqv);
15224 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15225 bf_set(lpfc_rq_context_rqe_count_1,
15226 &rq_create->u.request.context, hrq->entry_count);
15227 if (subtype == LPFC_NVMET)
15228 rq_create->u.request.context.buffer_size =
15229 LPFC_NVMET_DATA_BUF_SIZE;
15231 rq_create->u.request.context.buffer_size =
15232 LPFC_DATA_BUF_SIZE;
15233 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15235 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15236 (PAGE_SIZE/SLI4_PAGE_SIZE));
15238 switch (drq->entry_count) {
15240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15241 "2536 Unsupported RQ count. (%d)\n",
15243 if (drq->entry_count < 512) {
15247 /* otherwise default to smallest count (drop through) */
15249 bf_set(lpfc_rq_context_rqe_count,
15250 &rq_create->u.request.context,
15251 LPFC_RQ_RING_SIZE_512);
15254 bf_set(lpfc_rq_context_rqe_count,
15255 &rq_create->u.request.context,
15256 LPFC_RQ_RING_SIZE_1024);
15259 bf_set(lpfc_rq_context_rqe_count,
15260 &rq_create->u.request.context,
15261 LPFC_RQ_RING_SIZE_2048);
15264 bf_set(lpfc_rq_context_rqe_count,
15265 &rq_create->u.request.context,
15266 LPFC_RQ_RING_SIZE_4096);
15269 if (subtype == LPFC_NVMET)
15270 bf_set(lpfc_rq_context_buf_size,
15271 &rq_create->u.request.context,
15272 LPFC_NVMET_DATA_BUF_SIZE);
15274 bf_set(lpfc_rq_context_buf_size,
15275 &rq_create->u.request.context,
15276 LPFC_DATA_BUF_SIZE);
15278 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15280 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15282 list_for_each_entry(dmabuf, &drq->page_list, list) {
15283 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15284 putPaddrLow(dmabuf->phys);
15285 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15286 putPaddrHigh(dmabuf->phys);
15288 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15289 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15291 /* The IOCTL status is embedded in the mailbox subheader. */
15292 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15293 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15294 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15295 if (shdr_status || shdr_add_status || rc) {
15299 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15300 if (drq->queue_id == 0xFFFF) {
15304 drq->type = LPFC_DRQ;
15305 drq->assoc_qid = cq->queue_id;
15306 drq->subtype = subtype;
15307 drq->host_index = 0;
15308 drq->hba_index = 0;
15309 drq->entry_repost = LPFC_RQ_REPOST;
15311 /* link the header and data RQs onto the parent cq child list */
15312 list_add_tail(&hrq->list, &cq->child_list);
15313 list_add_tail(&drq->list, &cq->child_list);
15316 mempool_free(mbox, phba->mbox_mem_pool);
15321 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15322 * @phba: HBA structure that indicates port to create a queue on.
15323 * @hrqp: The queue structure array to use to create the header receive queues.
15324 * @drqp: The queue structure array to use to create the data receive queues.
15325 * @cqp: The completion queue array to bind these receive queues to.
15327 * This function creates a receive buffer queue pair , as detailed in @hrq and
15328 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15331 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15332 * struct is used to get the entry count that is necessary to determine the
15333 * number of pages to use for this queue. The @cq is used to indicate which
15334 * completion queue to bind received buffers that are posted to these queues to.
15335 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15336 * receive queue pair. This function is asynchronous and will wait for the
15337 * mailbox command to finish before continuing.
15339 * On success this function will return a zero. If unable to allocate enough
15340 * memory this function will return -ENOMEM. If the queue create mailbox command
15341 * fails this function will return -ENXIO.
15344 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15345 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15348 struct lpfc_queue *hrq, *drq, *cq;
15349 struct lpfc_mbx_rq_create_v2 *rq_create;
15350 struct lpfc_dmabuf *dmabuf;
15351 LPFC_MBOXQ_t *mbox;
15352 int rc, length, alloclen, status = 0;
15353 int cnt, idx, numrq, page_idx = 0;
15354 uint32_t shdr_status, shdr_add_status;
15355 union lpfc_sli4_cfg_shdr *shdr;
15356 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15358 numrq = phba->cfg_nvmet_mrq;
15359 /* sanity check on array memory */
15360 if (!hrqp || !drqp || !cqp || !numrq)
15362 if (!phba->sli4_hba.pc_sli4_params.supported)
15363 hw_page_size = SLI4_PAGE_SIZE;
15365 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15369 length = sizeof(struct lpfc_mbx_rq_create_v2);
15370 length += ((2 * numrq * hrqp[0]->page_count) *
15371 sizeof(struct dma_address));
15373 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15374 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15375 LPFC_SLI4_MBX_NEMBED);
15376 if (alloclen < length) {
15377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15378 "3099 Allocated DMA memory size (%d) is "
15379 "less than the requested DMA memory size "
15380 "(%d)\n", alloclen, length);
15387 rq_create = mbox->sge_array->addr[0];
15388 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15390 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15393 for (idx = 0; idx < numrq; idx++) {
15398 /* sanity check on queue memory */
15399 if (!hrq || !drq || !cq) {
15404 if (hrq->entry_count != drq->entry_count) {
15410 bf_set(lpfc_mbx_rq_create_num_pages,
15411 &rq_create->u.request,
15413 bf_set(lpfc_mbx_rq_create_rq_cnt,
15414 &rq_create->u.request, (numrq * 2));
15415 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15417 bf_set(lpfc_rq_context_base_cq,
15418 &rq_create->u.request.context,
15420 bf_set(lpfc_rq_context_data_size,
15421 &rq_create->u.request.context,
15422 LPFC_NVMET_DATA_BUF_SIZE);
15423 bf_set(lpfc_rq_context_hdr_size,
15424 &rq_create->u.request.context,
15425 LPFC_HDR_BUF_SIZE);
15426 bf_set(lpfc_rq_context_rqe_count_1,
15427 &rq_create->u.request.context,
15429 bf_set(lpfc_rq_context_rqe_size,
15430 &rq_create->u.request.context,
15432 bf_set(lpfc_rq_context_page_size,
15433 &rq_create->u.request.context,
15434 (PAGE_SIZE/SLI4_PAGE_SIZE));
15437 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15438 memset(dmabuf->virt, 0, hw_page_size);
15439 cnt = page_idx + dmabuf->buffer_tag;
15440 rq_create->u.request.page[cnt].addr_lo =
15441 putPaddrLow(dmabuf->phys);
15442 rq_create->u.request.page[cnt].addr_hi =
15443 putPaddrHigh(dmabuf->phys);
15449 list_for_each_entry(dmabuf, &drq->page_list, list) {
15450 memset(dmabuf->virt, 0, hw_page_size);
15451 cnt = page_idx + dmabuf->buffer_tag;
15452 rq_create->u.request.page[cnt].addr_lo =
15453 putPaddrLow(dmabuf->phys);
15454 rq_create->u.request.page[cnt].addr_hi =
15455 putPaddrHigh(dmabuf->phys);
15460 hrq->db_format = LPFC_DB_RING_FORMAT;
15461 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15462 hrq->type = LPFC_HRQ;
15463 hrq->assoc_qid = cq->queue_id;
15464 hrq->subtype = subtype;
15465 hrq->host_index = 0;
15466 hrq->hba_index = 0;
15467 hrq->entry_repost = LPFC_RQ_REPOST;
15469 drq->db_format = LPFC_DB_RING_FORMAT;
15470 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15471 drq->type = LPFC_DRQ;
15472 drq->assoc_qid = cq->queue_id;
15473 drq->subtype = subtype;
15474 drq->host_index = 0;
15475 drq->hba_index = 0;
15476 drq->entry_repost = LPFC_RQ_REPOST;
15478 list_add_tail(&hrq->list, &cq->child_list);
15479 list_add_tail(&drq->list, &cq->child_list);
15482 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15483 /* The IOCTL status is embedded in the mailbox subheader. */
15484 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15485 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15486 if (shdr_status || shdr_add_status || rc) {
15487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15488 "3120 RQ_CREATE mailbox failed with "
15489 "status x%x add_status x%x, mbx status x%x\n",
15490 shdr_status, shdr_add_status, rc);
15494 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15495 if (rc == 0xFFFF) {
15500 /* Initialize all RQs with associated queue id */
15501 for (idx = 0; idx < numrq; idx++) {
15503 hrq->queue_id = rc + (2 * idx);
15505 drq->queue_id = rc + (2 * idx) + 1;
15509 lpfc_sli4_mbox_cmd_free(phba, mbox);
15514 * lpfc_eq_destroy - Destroy an event Queue on the HBA
15515 * @eq: The queue structure associated with the queue to destroy.
15517 * This function destroys a queue, as detailed in @eq by sending an mailbox
15518 * command, specific to the type of queue, to the HBA.
15520 * The @eq struct is used to get the queue ID of the queue to destroy.
15522 * On success this function will return a zero. If the queue destroy mailbox
15523 * command fails this function will return -ENXIO.
15526 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15528 LPFC_MBOXQ_t *mbox;
15529 int rc, length, status = 0;
15530 uint32_t shdr_status, shdr_add_status;
15531 union lpfc_sli4_cfg_shdr *shdr;
15533 /* sanity check on queue memory */
15536 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15539 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15540 sizeof(struct lpfc_sli4_cfg_mhdr));
15541 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15542 LPFC_MBOX_OPCODE_EQ_DESTROY,
15543 length, LPFC_SLI4_MBX_EMBED);
15544 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15546 mbox->vport = eq->phba->pport;
15547 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15549 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15550 /* The IOCTL status is embedded in the mailbox subheader. */
15551 shdr = (union lpfc_sli4_cfg_shdr *)
15552 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15553 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15554 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15555 if (shdr_status || shdr_add_status || rc) {
15556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15557 "2505 EQ_DESTROY mailbox failed with "
15558 "status x%x add_status x%x, mbx status x%x\n",
15559 shdr_status, shdr_add_status, rc);
15563 /* Remove eq from any list */
15564 list_del_init(&eq->list);
15565 mempool_free(mbox, eq->phba->mbox_mem_pool);
15570 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15571 * @cq: The queue structure associated with the queue to destroy.
15573 * This function destroys a queue, as detailed in @cq by sending an mailbox
15574 * command, specific to the type of queue, to the HBA.
15576 * The @cq struct is used to get the queue ID of the queue to destroy.
15578 * On success this function will return a zero. If the queue destroy mailbox
15579 * command fails this function will return -ENXIO.
15582 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15584 LPFC_MBOXQ_t *mbox;
15585 int rc, length, status = 0;
15586 uint32_t shdr_status, shdr_add_status;
15587 union lpfc_sli4_cfg_shdr *shdr;
15589 /* sanity check on queue memory */
15592 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15595 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15596 sizeof(struct lpfc_sli4_cfg_mhdr));
15597 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15598 LPFC_MBOX_OPCODE_CQ_DESTROY,
15599 length, LPFC_SLI4_MBX_EMBED);
15600 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15602 mbox->vport = cq->phba->pport;
15603 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15604 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15605 /* The IOCTL status is embedded in the mailbox subheader. */
15606 shdr = (union lpfc_sli4_cfg_shdr *)
15607 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15608 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15609 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15610 if (shdr_status || shdr_add_status || rc) {
15611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15612 "2506 CQ_DESTROY mailbox failed with "
15613 "status x%x add_status x%x, mbx status x%x\n",
15614 shdr_status, shdr_add_status, rc);
15617 /* Remove cq from any list */
15618 list_del_init(&cq->list);
15619 mempool_free(mbox, cq->phba->mbox_mem_pool);
15624 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15625 * @qm: The queue structure associated with the queue to destroy.
15627 * This function destroys a queue, as detailed in @mq by sending an mailbox
15628 * command, specific to the type of queue, to the HBA.
15630 * The @mq struct is used to get the queue ID of the queue to destroy.
15632 * On success this function will return a zero. If the queue destroy mailbox
15633 * command fails this function will return -ENXIO.
15636 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15638 LPFC_MBOXQ_t *mbox;
15639 int rc, length, status = 0;
15640 uint32_t shdr_status, shdr_add_status;
15641 union lpfc_sli4_cfg_shdr *shdr;
15643 /* sanity check on queue memory */
15646 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15649 length = (sizeof(struct lpfc_mbx_mq_destroy) -
15650 sizeof(struct lpfc_sli4_cfg_mhdr));
15651 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15652 LPFC_MBOX_OPCODE_MQ_DESTROY,
15653 length, LPFC_SLI4_MBX_EMBED);
15654 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15656 mbox->vport = mq->phba->pport;
15657 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15658 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15659 /* The IOCTL status is embedded in the mailbox subheader. */
15660 shdr = (union lpfc_sli4_cfg_shdr *)
15661 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15662 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15663 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15664 if (shdr_status || shdr_add_status || rc) {
15665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15666 "2507 MQ_DESTROY mailbox failed with "
15667 "status x%x add_status x%x, mbx status x%x\n",
15668 shdr_status, shdr_add_status, rc);
15671 /* Remove mq from any list */
15672 list_del_init(&mq->list);
15673 mempool_free(mbox, mq->phba->mbox_mem_pool);
15678 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15679 * @wq: The queue structure associated with the queue to destroy.
15681 * This function destroys a queue, as detailed in @wq by sending an mailbox
15682 * command, specific to the type of queue, to the HBA.
15684 * The @wq struct is used to get the queue ID of the queue to destroy.
15686 * On success this function will return a zero. If the queue destroy mailbox
15687 * command fails this function will return -ENXIO.
15690 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
15692 LPFC_MBOXQ_t *mbox;
15693 int rc, length, status = 0;
15694 uint32_t shdr_status, shdr_add_status;
15695 union lpfc_sli4_cfg_shdr *shdr;
15697 /* sanity check on queue memory */
15700 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
15703 length = (sizeof(struct lpfc_mbx_wq_destroy) -
15704 sizeof(struct lpfc_sli4_cfg_mhdr));
15705 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15706 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
15707 length, LPFC_SLI4_MBX_EMBED);
15708 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
15710 mbox->vport = wq->phba->pport;
15711 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15712 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
15713 shdr = (union lpfc_sli4_cfg_shdr *)
15714 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
15715 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15716 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15717 if (shdr_status || shdr_add_status || rc) {
15718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15719 "2508 WQ_DESTROY mailbox failed with "
15720 "status x%x add_status x%x, mbx status x%x\n",
15721 shdr_status, shdr_add_status, rc);
15724 /* Remove wq from any list */
15725 list_del_init(&wq->list);
15728 mempool_free(mbox, wq->phba->mbox_mem_pool);
15733 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
15734 * @rq: The queue structure associated with the queue to destroy.
15736 * This function destroys a queue, as detailed in @rq by sending an mailbox
15737 * command, specific to the type of queue, to the HBA.
15739 * The @rq struct is used to get the queue ID of the queue to destroy.
15741 * On success this function will return a zero. If the queue destroy mailbox
15742 * command fails this function will return -ENXIO.
15745 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15746 struct lpfc_queue *drq)
15748 LPFC_MBOXQ_t *mbox;
15749 int rc, length, status = 0;
15750 uint32_t shdr_status, shdr_add_status;
15751 union lpfc_sli4_cfg_shdr *shdr;
15753 /* sanity check on queue memory */
15756 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
15759 length = (sizeof(struct lpfc_mbx_rq_destroy) -
15760 sizeof(struct lpfc_sli4_cfg_mhdr));
15761 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15762 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
15763 length, LPFC_SLI4_MBX_EMBED);
15764 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15766 mbox->vport = hrq->phba->pport;
15767 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15768 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
15769 /* The IOCTL status is embedded in the mailbox subheader. */
15770 shdr = (union lpfc_sli4_cfg_shdr *)
15771 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15772 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15773 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15774 if (shdr_status || shdr_add_status || rc) {
15775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15776 "2509 RQ_DESTROY mailbox failed with "
15777 "status x%x add_status x%x, mbx status x%x\n",
15778 shdr_status, shdr_add_status, rc);
15779 if (rc != MBX_TIMEOUT)
15780 mempool_free(mbox, hrq->phba->mbox_mem_pool);
15783 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15785 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
15786 shdr = (union lpfc_sli4_cfg_shdr *)
15787 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15788 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15789 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15790 if (shdr_status || shdr_add_status || rc) {
15791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15792 "2510 RQ_DESTROY mailbox failed with "
15793 "status x%x add_status x%x, mbx status x%x\n",
15794 shdr_status, shdr_add_status, rc);
15797 list_del_init(&hrq->list);
15798 list_del_init(&drq->list);
15799 mempool_free(mbox, hrq->phba->mbox_mem_pool);
15804 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
15805 * @phba: The virtual port for which this call being executed.
15806 * @pdma_phys_addr0: Physical address of the 1st SGL page.
15807 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
15808 * @xritag: the xritag that ties this io to the SGL pages.
15810 * This routine will post the sgl pages for the IO that has the xritag
15811 * that is in the iocbq structure. The xritag is assigned during iocbq
15812 * creation and persists for as long as the driver is loaded.
15813 * if the caller has fewer than 256 scatter gather segments to map then
15814 * pdma_phys_addr1 should be 0.
15815 * If the caller needs to map more than 256 scatter gather segment then
15816 * pdma_phys_addr1 should be a valid physical address.
15817 * physical address for SGLs must be 64 byte aligned.
15818 * If you are going to map 2 SGL's then the first one must have 256 entries
15819 * the second sgl can have between 1 and 256 entries.
15823 * -ENXIO, -ENOMEM - Failure
15826 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
15827 dma_addr_t pdma_phys_addr0,
15828 dma_addr_t pdma_phys_addr1,
15831 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
15832 LPFC_MBOXQ_t *mbox;
15834 uint32_t shdr_status, shdr_add_status;
15836 union lpfc_sli4_cfg_shdr *shdr;
15838 if (xritag == NO_XRI) {
15839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15840 "0364 Invalid param:\n");
15844 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15849 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
15850 sizeof(struct lpfc_mbx_post_sgl_pages) -
15851 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15853 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
15854 &mbox->u.mqe.un.post_sgl_pages;
15855 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
15856 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
15858 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
15859 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
15860 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
15861 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
15863 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
15864 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
15865 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
15866 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
15867 if (!phba->sli4_hba.intr_enable)
15868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15870 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15873 /* The IOCTL status is embedded in the mailbox subheader. */
15874 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
15875 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15876 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15877 if (rc != MBX_TIMEOUT)
15878 mempool_free(mbox, phba->mbox_mem_pool);
15879 if (shdr_status || shdr_add_status || rc) {
15880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15881 "2511 POST_SGL mailbox failed with "
15882 "status x%x add_status x%x, mbx status x%x\n",
15883 shdr_status, shdr_add_status, rc);
15889 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
15890 * @phba: pointer to lpfc hba data structure.
15892 * This routine is invoked to post rpi header templates to the
15893 * HBA consistent with the SLI-4 interface spec. This routine
15894 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15895 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15898 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15899 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15902 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15907 * Fetch the next logical xri. Because this index is logical,
15908 * the driver starts at 0 each time.
15910 spin_lock_irq(&phba->hbalock);
15911 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15912 phba->sli4_hba.max_cfg_param.max_xri, 0);
15913 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15914 spin_unlock_irq(&phba->hbalock);
15917 set_bit(xri, phba->sli4_hba.xri_bmask);
15918 phba->sli4_hba.max_cfg_param.xri_used++;
15920 spin_unlock_irq(&phba->hbalock);
15925 * lpfc_sli4_free_xri - Release an xri for reuse.
15926 * @phba: pointer to lpfc hba data structure.
15928 * This routine is invoked to release an xri to the pool of
15929 * available rpis maintained by the driver.
15932 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15934 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
15935 phba->sli4_hba.max_cfg_param.xri_used--;
15940 * lpfc_sli4_free_xri - Release an xri for reuse.
15941 * @phba: pointer to lpfc hba data structure.
15943 * This routine is invoked to release an xri to the pool of
15944 * available rpis maintained by the driver.
15947 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15949 spin_lock_irq(&phba->hbalock);
15950 __lpfc_sli4_free_xri(phba, xri);
15951 spin_unlock_irq(&phba->hbalock);
15955 * lpfc_sli4_next_xritag - Get an xritag for the io
15956 * @phba: Pointer to HBA context object.
15958 * This function gets an xritag for the iocb. If there is no unused xritag
15959 * it will return 0xffff.
15960 * The function returns the allocated xritag if successful, else returns zero.
15961 * Zero is not a valid xritag.
15962 * The caller is not required to hold any lock.
15965 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15967 uint16_t xri_index;
15969 xri_index = lpfc_sli4_alloc_xri(phba);
15970 if (xri_index == NO_XRI)
15971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15972 "2004 Failed to allocate XRI.last XRITAG is %d"
15973 " Max XRI is %d, Used XRI is %d\n",
15975 phba->sli4_hba.max_cfg_param.max_xri,
15976 phba->sli4_hba.max_cfg_param.xri_used);
15981 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
15982 * @phba: pointer to lpfc hba data structure.
15983 * @post_sgl_list: pointer to els sgl entry list.
15984 * @count: number of els sgl entries on the list.
15986 * This routine is invoked to post a block of driver's sgl pages to the
15987 * HBA using non-embedded mailbox command. No Lock is held. This routine
15988 * is only called when the driver is loading and after all IO has been
15992 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
15993 struct list_head *post_sgl_list,
15996 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
15997 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15998 struct sgl_page_pairs *sgl_pg_pairs;
16000 LPFC_MBOXQ_t *mbox;
16001 uint32_t reqlen, alloclen, pg_pairs;
16003 uint16_t xritag_start = 0;
16005 uint32_t shdr_status, shdr_add_status;
16006 union lpfc_sli4_cfg_shdr *shdr;
16008 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16009 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16010 if (reqlen > SLI4_PAGE_SIZE) {
16011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16012 "2559 Block sgl registration required DMA "
16013 "size (%d) great than a page\n", reqlen);
16017 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16021 /* Allocate DMA memory and set up the non-embedded mailbox command */
16022 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16023 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16024 LPFC_SLI4_MBX_NEMBED);
16026 if (alloclen < reqlen) {
16027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16028 "0285 Allocated DMA memory size (%d) is "
16029 "less than the requested DMA memory "
16030 "size (%d)\n", alloclen, reqlen);
16031 lpfc_sli4_mbox_cmd_free(phba, mbox);
16034 /* Set up the SGL pages in the non-embedded DMA pages */
16035 viraddr = mbox->sge_array->addr[0];
16036 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16037 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16040 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16041 /* Set up the sge entry */
16042 sgl_pg_pairs->sgl_pg0_addr_lo =
16043 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16044 sgl_pg_pairs->sgl_pg0_addr_hi =
16045 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16046 sgl_pg_pairs->sgl_pg1_addr_lo =
16047 cpu_to_le32(putPaddrLow(0));
16048 sgl_pg_pairs->sgl_pg1_addr_hi =
16049 cpu_to_le32(putPaddrHigh(0));
16051 /* Keep the first xritag on the list */
16053 xritag_start = sglq_entry->sli4_xritag;
16058 /* Complete initialization and perform endian conversion. */
16059 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16060 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16061 sgl->word0 = cpu_to_le32(sgl->word0);
16063 if (!phba->sli4_hba.intr_enable)
16064 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16066 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16067 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16069 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16070 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16071 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16072 if (rc != MBX_TIMEOUT)
16073 lpfc_sli4_mbox_cmd_free(phba, mbox);
16074 if (shdr_status || shdr_add_status || rc) {
16075 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16076 "2513 POST_SGL_BLOCK mailbox command failed "
16077 "status x%x add_status x%x mbx status x%x\n",
16078 shdr_status, shdr_add_status, rc);
16085 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16086 * @phba: pointer to lpfc hba data structure.
16087 * @sblist: pointer to scsi buffer list.
16088 * @count: number of scsi buffers on the list.
16090 * This routine is invoked to post a block of @count scsi sgl pages from a
16091 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16096 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16097 struct list_head *sblist,
16100 struct lpfc_scsi_buf *psb;
16101 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16102 struct sgl_page_pairs *sgl_pg_pairs;
16104 LPFC_MBOXQ_t *mbox;
16105 uint32_t reqlen, alloclen, pg_pairs;
16107 uint16_t xritag_start = 0;
16109 uint32_t shdr_status, shdr_add_status;
16110 dma_addr_t pdma_phys_bpl1;
16111 union lpfc_sli4_cfg_shdr *shdr;
16113 /* Calculate the requested length of the dma memory */
16114 reqlen = count * sizeof(struct sgl_page_pairs) +
16115 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16116 if (reqlen > SLI4_PAGE_SIZE) {
16117 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16118 "0217 Block sgl registration required DMA "
16119 "size (%d) great than a page\n", reqlen);
16122 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16125 "0283 Failed to allocate mbox cmd memory\n");
16129 /* Allocate DMA memory and set up the non-embedded mailbox command */
16130 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16131 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16132 LPFC_SLI4_MBX_NEMBED);
16134 if (alloclen < reqlen) {
16135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16136 "2561 Allocated DMA memory size (%d) is "
16137 "less than the requested DMA memory "
16138 "size (%d)\n", alloclen, reqlen);
16139 lpfc_sli4_mbox_cmd_free(phba, mbox);
16143 /* Get the first SGE entry from the non-embedded DMA memory */
16144 viraddr = mbox->sge_array->addr[0];
16146 /* Set up the SGL pages in the non-embedded DMA pages */
16147 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16148 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16151 list_for_each_entry(psb, sblist, list) {
16152 /* Set up the sge entry */
16153 sgl_pg_pairs->sgl_pg0_addr_lo =
16154 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16155 sgl_pg_pairs->sgl_pg0_addr_hi =
16156 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16157 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16158 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16160 pdma_phys_bpl1 = 0;
16161 sgl_pg_pairs->sgl_pg1_addr_lo =
16162 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16163 sgl_pg_pairs->sgl_pg1_addr_hi =
16164 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16165 /* Keep the first xritag on the list */
16167 xritag_start = psb->cur_iocbq.sli4_xritag;
16171 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16172 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16173 /* Perform endian conversion if necessary */
16174 sgl->word0 = cpu_to_le32(sgl->word0);
16176 if (!phba->sli4_hba.intr_enable)
16177 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16179 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16180 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16182 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16183 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16184 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16185 if (rc != MBX_TIMEOUT)
16186 lpfc_sli4_mbox_cmd_free(phba, mbox);
16187 if (shdr_status || shdr_add_status || rc) {
16188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16189 "2564 POST_SGL_BLOCK mailbox command failed "
16190 "status x%x add_status x%x mbx status x%x\n",
16191 shdr_status, shdr_add_status, rc);
16198 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16199 * @phba: pointer to lpfc_hba struct that the frame was received on
16200 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16202 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16203 * valid type of frame that the LPFC driver will handle. This function will
16204 * return a zero if the frame is a valid frame or a non zero value when the
16205 * frame does not pass the check.
16208 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16210 /* make rctl_names static to save stack space */
16211 struct fc_vft_header *fc_vft_hdr;
16212 uint32_t *header = (uint32_t *) fc_hdr;
16214 #define FC_RCTL_MDS_DIAGS 0xF4
16216 switch (fc_hdr->fh_r_ctl) {
16217 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16218 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16219 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16220 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16221 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16222 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16223 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16224 case FC_RCTL_DD_CMD_STATUS: /* command status */
16225 case FC_RCTL_ELS_REQ: /* extended link services request */
16226 case FC_RCTL_ELS_REP: /* extended link services reply */
16227 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16228 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16229 case FC_RCTL_BA_NOP: /* basic link service NOP */
16230 case FC_RCTL_BA_ABTS: /* basic link service abort */
16231 case FC_RCTL_BA_RMC: /* remove connection */
16232 case FC_RCTL_BA_ACC: /* basic accept */
16233 case FC_RCTL_BA_RJT: /* basic reject */
16234 case FC_RCTL_BA_PRMT:
16235 case FC_RCTL_ACK_1: /* acknowledge_1 */
16236 case FC_RCTL_ACK_0: /* acknowledge_0 */
16237 case FC_RCTL_P_RJT: /* port reject */
16238 case FC_RCTL_F_RJT: /* fabric reject */
16239 case FC_RCTL_P_BSY: /* port busy */
16240 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16241 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16242 case FC_RCTL_LCR: /* link credit reset */
16243 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16244 case FC_RCTL_END: /* end */
16246 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16247 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16248 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16249 return lpfc_fc_frame_check(phba, fc_hdr);
16254 #define FC_TYPE_VENDOR_UNIQUE 0xFF
16256 switch (fc_hdr->fh_type) {
16262 case FC_TYPE_VENDOR_UNIQUE:
16270 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16271 "2538 Received frame rctl:x%x, type:x%x, "
16272 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16273 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16274 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16275 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16276 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16277 be32_to_cpu(header[6]));
16280 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16281 "2539 Dropped frame rctl:x%x type:x%x\n",
16282 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16287 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16288 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16290 * This function processes the FC header to retrieve the VFI from the VF
16291 * header, if one exists. This function will return the VFI if one exists
16292 * or 0 if no VSAN Header exists.
16295 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16297 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16299 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16301 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16305 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16306 * @phba: Pointer to the HBA structure to search for the vport on
16307 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16308 * @fcfi: The FC Fabric ID that the frame came from
16310 * This function searches the @phba for a vport that matches the content of the
16311 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16312 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16313 * returns the matching vport pointer or NULL if unable to match frame to a
16316 static struct lpfc_vport *
16317 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16318 uint16_t fcfi, uint32_t did)
16320 struct lpfc_vport **vports;
16321 struct lpfc_vport *vport = NULL;
16324 if (did == Fabric_DID)
16325 return phba->pport;
16326 if ((phba->pport->fc_flag & FC_PT2PT) &&
16327 !(phba->link_state == LPFC_HBA_READY))
16328 return phba->pport;
16330 vports = lpfc_create_vport_work_array(phba);
16331 if (vports != NULL) {
16332 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16333 if (phba->fcf.fcfi == fcfi &&
16334 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16335 vports[i]->fc_myDID == did) {
16341 lpfc_destroy_vport_work_array(phba, vports);
16346 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16347 * @vport: The vport to work on.
16349 * This function updates the receive sequence time stamp for this vport. The
16350 * receive sequence time stamp indicates the time that the last frame of the
16351 * the sequence that has been idle for the longest amount of time was received.
16352 * the driver uses this time stamp to indicate if any received sequences have
16356 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16358 struct lpfc_dmabuf *h_buf;
16359 struct hbq_dmabuf *dmabuf = NULL;
16361 /* get the oldest sequence on the rcv list */
16362 h_buf = list_get_first(&vport->rcv_buffer_list,
16363 struct lpfc_dmabuf, list);
16366 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16367 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16371 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16372 * @vport: The vport that the received sequences were sent to.
16374 * This function cleans up all outstanding received sequences. This is called
16375 * by the driver when a link event or user action invalidates all the received
16379 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16381 struct lpfc_dmabuf *h_buf, *hnext;
16382 struct lpfc_dmabuf *d_buf, *dnext;
16383 struct hbq_dmabuf *dmabuf = NULL;
16385 /* start with the oldest sequence on the rcv list */
16386 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16387 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16388 list_del_init(&dmabuf->hbuf.list);
16389 list_for_each_entry_safe(d_buf, dnext,
16390 &dmabuf->dbuf.list, list) {
16391 list_del_init(&d_buf->list);
16392 lpfc_in_buf_free(vport->phba, d_buf);
16394 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16399 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16400 * @vport: The vport that the received sequences were sent to.
16402 * This function determines whether any received sequences have timed out by
16403 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16404 * indicates that there is at least one timed out sequence this routine will
16405 * go through the received sequences one at a time from most inactive to most
16406 * active to determine which ones need to be cleaned up. Once it has determined
16407 * that a sequence needs to be cleaned up it will simply free up the resources
16408 * without sending an abort.
16411 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16413 struct lpfc_dmabuf *h_buf, *hnext;
16414 struct lpfc_dmabuf *d_buf, *dnext;
16415 struct hbq_dmabuf *dmabuf = NULL;
16416 unsigned long timeout;
16417 int abort_count = 0;
16419 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16420 vport->rcv_buffer_time_stamp);
16421 if (list_empty(&vport->rcv_buffer_list) ||
16422 time_before(jiffies, timeout))
16424 /* start with the oldest sequence on the rcv list */
16425 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16426 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16427 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16428 dmabuf->time_stamp);
16429 if (time_before(jiffies, timeout))
16432 list_del_init(&dmabuf->hbuf.list);
16433 list_for_each_entry_safe(d_buf, dnext,
16434 &dmabuf->dbuf.list, list) {
16435 list_del_init(&d_buf->list);
16436 lpfc_in_buf_free(vport->phba, d_buf);
16438 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16441 lpfc_update_rcv_time_stamp(vport);
16445 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16446 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16448 * This function searches through the existing incomplete sequences that have
16449 * been sent to this @vport. If the frame matches one of the incomplete
16450 * sequences then the dbuf in the @dmabuf is added to the list of frames that
16451 * make up that sequence. If no sequence is found that matches this frame then
16452 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16453 * This function returns a pointer to the first dmabuf in the sequence list that
16454 * the frame was linked to.
16456 static struct hbq_dmabuf *
16457 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16459 struct fc_frame_header *new_hdr;
16460 struct fc_frame_header *temp_hdr;
16461 struct lpfc_dmabuf *d_buf;
16462 struct lpfc_dmabuf *h_buf;
16463 struct hbq_dmabuf *seq_dmabuf = NULL;
16464 struct hbq_dmabuf *temp_dmabuf = NULL;
16467 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16468 dmabuf->time_stamp = jiffies;
16469 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16471 /* Use the hdr_buf to find the sequence that this frame belongs to */
16472 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16473 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16474 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16475 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16476 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16478 /* found a pending sequence that matches this frame */
16479 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16484 * This indicates first frame received for this sequence.
16485 * Queue the buffer on the vport's rcv_buffer_list.
16487 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16488 lpfc_update_rcv_time_stamp(vport);
16491 temp_hdr = seq_dmabuf->hbuf.virt;
16492 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16493 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16494 list_del_init(&seq_dmabuf->hbuf.list);
16495 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16496 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16497 lpfc_update_rcv_time_stamp(vport);
16500 /* move this sequence to the tail to indicate a young sequence */
16501 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16502 seq_dmabuf->time_stamp = jiffies;
16503 lpfc_update_rcv_time_stamp(vport);
16504 if (list_empty(&seq_dmabuf->dbuf.list)) {
16505 temp_hdr = dmabuf->hbuf.virt;
16506 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16509 /* find the correct place in the sequence to insert this frame */
16510 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16512 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16513 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16515 * If the frame's sequence count is greater than the frame on
16516 * the list then insert the frame right after this frame
16518 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16519 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16520 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16525 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16527 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16536 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16537 * @vport: pointer to a vitural port
16538 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16540 * This function tries to abort from the partially assembed sequence, described
16541 * by the information from basic abbort @dmabuf. It checks to see whether such
16542 * partially assembled sequence held by the driver. If so, it shall free up all
16543 * the frames from the partially assembled sequence.
16546 * true -- if there is matching partially assembled sequence present and all
16547 * the frames freed with the sequence;
16548 * false -- if there is no matching partially assembled sequence present so
16549 * nothing got aborted in the lower layer driver
16552 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16553 struct hbq_dmabuf *dmabuf)
16555 struct fc_frame_header *new_hdr;
16556 struct fc_frame_header *temp_hdr;
16557 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16558 struct hbq_dmabuf *seq_dmabuf = NULL;
16560 /* Use the hdr_buf to find the sequence that matches this frame */
16561 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16562 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16563 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16564 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16565 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16566 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16567 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16568 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16570 /* found a pending sequence that matches this frame */
16571 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16575 /* Free up all the frames from the partially assembled sequence */
16577 list_for_each_entry_safe(d_buf, n_buf,
16578 &seq_dmabuf->dbuf.list, list) {
16579 list_del_init(&d_buf->list);
16580 lpfc_in_buf_free(vport->phba, d_buf);
16588 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16589 * @vport: pointer to a vitural port
16590 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16592 * This function tries to abort from the assembed sequence from upper level
16593 * protocol, described by the information from basic abbort @dmabuf. It
16594 * checks to see whether such pending context exists at upper level protocol.
16595 * If so, it shall clean up the pending context.
16598 * true -- if there is matching pending context of the sequence cleaned
16600 * false -- if there is no matching pending context of the sequence present
16604 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16606 struct lpfc_hba *phba = vport->phba;
16609 /* Accepting abort at ulp with SLI4 only */
16610 if (phba->sli_rev < LPFC_SLI_REV4)
16613 /* Register all caring upper level protocols to attend abort */
16614 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16622 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
16623 * @phba: Pointer to HBA context object.
16624 * @cmd_iocbq: pointer to the command iocbq structure.
16625 * @rsp_iocbq: pointer to the response iocbq structure.
16627 * This function handles the sequence abort response iocb command complete
16628 * event. It properly releases the memory allocated to the sequence abort
16632 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
16633 struct lpfc_iocbq *cmd_iocbq,
16634 struct lpfc_iocbq *rsp_iocbq)
16636 struct lpfc_nodelist *ndlp;
16639 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16640 lpfc_nlp_put(ndlp);
16641 lpfc_sli_release_iocbq(phba, cmd_iocbq);
16644 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
16645 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16647 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
16648 rsp_iocbq->iocb.ulpStatus,
16649 rsp_iocbq->iocb.un.ulpWord[4]);
16653 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16654 * @phba: Pointer to HBA context object.
16655 * @xri: xri id in transaction.
16657 * This function validates the xri maps to the known range of XRIs allocated an
16658 * used by the driver.
16661 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16666 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16667 if (xri == phba->sli4_hba.xri_ids[i])
16674 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
16675 * @phba: Pointer to HBA context object.
16676 * @fc_hdr: pointer to a FC frame header.
16678 * This function sends a basic response to a previous unsol sequence abort
16679 * event after aborting the sequence handling.
16682 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16683 struct fc_frame_header *fc_hdr, bool aborted)
16685 struct lpfc_hba *phba = vport->phba;
16686 struct lpfc_iocbq *ctiocb = NULL;
16687 struct lpfc_nodelist *ndlp;
16688 uint16_t oxid, rxid, xri, lxri;
16689 uint32_t sid, fctl;
16693 if (!lpfc_is_link_up(phba))
16696 sid = sli4_sid_from_fc_hdr(fc_hdr);
16697 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
16698 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
16700 ndlp = lpfc_findnode_did(vport, sid);
16702 ndlp = lpfc_nlp_init(vport, sid);
16704 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16705 "1268 Failed to allocate ndlp for "
16706 "oxid:x%x SID:x%x\n", oxid, sid);
16709 /* Put ndlp onto pport node list */
16710 lpfc_enqueue_node(vport, ndlp);
16711 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
16712 /* re-setup ndlp without removing from node list */
16713 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
16715 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16716 "3275 Failed to active ndlp found "
16717 "for oxid:x%x SID:x%x\n", oxid, sid);
16722 /* Allocate buffer for rsp iocb */
16723 ctiocb = lpfc_sli_get_iocbq(phba);
16727 /* Extract the F_CTL field from FC_HDR */
16728 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
16730 icmd = &ctiocb->iocb;
16731 icmd->un.xseq64.bdl.bdeSize = 0;
16732 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
16733 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
16734 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
16735 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
16737 /* Fill in the rest of iocb fields */
16738 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
16739 icmd->ulpBdeCount = 0;
16741 icmd->ulpClass = CLASS3;
16742 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
16743 ctiocb->context1 = lpfc_nlp_get(ndlp);
16745 ctiocb->iocb_cmpl = NULL;
16746 ctiocb->vport = phba->pport;
16747 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
16748 ctiocb->sli4_lxritag = NO_XRI;
16749 ctiocb->sli4_xritag = NO_XRI;
16751 if (fctl & FC_FC_EX_CTX)
16752 /* Exchange responder sent the abort so we
16758 lxri = lpfc_sli4_xri_inrange(phba, xri);
16759 if (lxri != NO_XRI)
16760 lpfc_set_rrq_active(phba, ndlp, lxri,
16761 (xri == oxid) ? rxid : oxid, 0);
16762 /* For BA_ABTS from exchange responder, if the logical xri with
16763 * the oxid maps to the FCP XRI range, the port no longer has
16764 * that exchange context, send a BLS_RJT. Override the IOCB for
16767 if ((fctl & FC_FC_EX_CTX) &&
16768 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
16769 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16770 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16771 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16772 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16775 /* If BA_ABTS failed to abort a partially assembled receive sequence,
16776 * the driver no longer has that exchange, send a BLS_RJT. Override
16777 * the IOCB for a BA_RJT.
16779 if (aborted == false) {
16780 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16781 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16782 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16783 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16786 if (fctl & FC_FC_EX_CTX) {
16787 /* ABTS sent by responder to CT exchange, construction
16788 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
16789 * field and RX_ID from ABTS for RX_ID field.
16791 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
16793 /* ABTS sent by initiator to CT exchange, construction
16794 * of BA_ACC will need to allocate a new XRI as for the
16797 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
16799 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
16800 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
16802 /* Xmit CT abts response on exchange <xid> */
16803 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
16804 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
16805 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
16807 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
16808 if (rc == IOCB_ERROR) {
16809 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
16810 "2925 Failed to issue CT ABTS RSP x%x on "
16811 "xri x%x, Data x%x\n",
16812 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
16814 lpfc_nlp_put(ndlp);
16815 ctiocb->context1 = NULL;
16816 lpfc_sli_release_iocbq(phba, ctiocb);
16821 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
16822 * @vport: Pointer to the vport on which this sequence was received
16823 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16825 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
16826 * receive sequence is only partially assembed by the driver, it shall abort
16827 * the partially assembled frames for the sequence. Otherwise, if the
16828 * unsolicited receive sequence has been completely assembled and passed to
16829 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
16830 * unsolicited sequence has been aborted. After that, it will issue a basic
16831 * accept to accept the abort.
16834 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
16835 struct hbq_dmabuf *dmabuf)
16837 struct lpfc_hba *phba = vport->phba;
16838 struct fc_frame_header fc_hdr;
16842 /* Make a copy of fc_hdr before the dmabuf being released */
16843 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
16844 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
16846 if (fctl & FC_FC_EX_CTX) {
16847 /* ABTS by responder to exchange, no cleanup needed */
16850 /* ABTS by initiator to exchange, need to do cleanup */
16851 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
16852 if (aborted == false)
16853 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
16855 lpfc_in_buf_free(phba, &dmabuf->dbuf);
16857 if (phba->nvmet_support) {
16858 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
16862 /* Respond with BA_ACC or BA_RJT accordingly */
16863 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
16867 * lpfc_seq_complete - Indicates if a sequence is complete
16868 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16870 * This function checks the sequence, starting with the frame described by
16871 * @dmabuf, to see if all the frames associated with this sequence are present.
16872 * the frames associated with this sequence are linked to the @dmabuf using the
16873 * dbuf list. This function looks for two major things. 1) That the first frame
16874 * has a sequence count of zero. 2) There is a frame with last frame of sequence
16875 * set. 3) That there are no holes in the sequence count. The function will
16876 * return 1 when the sequence is complete, otherwise it will return 0.
16879 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
16881 struct fc_frame_header *hdr;
16882 struct lpfc_dmabuf *d_buf;
16883 struct hbq_dmabuf *seq_dmabuf;
16887 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16888 /* make sure first fame of sequence has a sequence count of zero */
16889 if (hdr->fh_seq_cnt != seq_count)
16891 fctl = (hdr->fh_f_ctl[0] << 16 |
16892 hdr->fh_f_ctl[1] << 8 |
16894 /* If last frame of sequence we can return success. */
16895 if (fctl & FC_FC_END_SEQ)
16897 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
16898 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16899 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16900 /* If there is a hole in the sequence count then fail. */
16901 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
16903 fctl = (hdr->fh_f_ctl[0] << 16 |
16904 hdr->fh_f_ctl[1] << 8 |
16906 /* If last frame of sequence we can return success. */
16907 if (fctl & FC_FC_END_SEQ)
16914 * lpfc_prep_seq - Prep sequence for ULP processing
16915 * @vport: Pointer to the vport on which this sequence was received
16916 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16918 * This function takes a sequence, described by a list of frames, and creates
16919 * a list of iocbq structures to describe the sequence. This iocbq list will be
16920 * used to issue to the generic unsolicited sequence handler. This routine
16921 * returns a pointer to the first iocbq in the list. If the function is unable
16922 * to allocate an iocbq then it throw out the received frames that were not
16923 * able to be described and return a pointer to the first iocbq. If unable to
16924 * allocate any iocbqs (including the first) this function will return NULL.
16926 static struct lpfc_iocbq *
16927 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16929 struct hbq_dmabuf *hbq_buf;
16930 struct lpfc_dmabuf *d_buf, *n_buf;
16931 struct lpfc_iocbq *first_iocbq, *iocbq;
16932 struct fc_frame_header *fc_hdr;
16934 uint32_t len, tot_len;
16935 struct ulp_bde64 *pbde;
16937 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16938 /* remove from receive buffer list */
16939 list_del_init(&seq_dmabuf->hbuf.list);
16940 lpfc_update_rcv_time_stamp(vport);
16941 /* get the Remote Port's SID */
16942 sid = sli4_sid_from_fc_hdr(fc_hdr);
16944 /* Get an iocbq struct to fill in. */
16945 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16947 /* Initialize the first IOCB. */
16948 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
16949 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
16950 first_iocbq->vport = vport;
16952 /* Check FC Header to see what TYPE of frame we are rcv'ing */
16953 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16954 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16955 first_iocbq->iocb.un.rcvels.parmRo =
16956 sli4_did_from_fc_hdr(fc_hdr);
16957 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16959 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
16960 first_iocbq->iocb.ulpContext = NO_XRI;
16961 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16962 be16_to_cpu(fc_hdr->fh_ox_id);
16963 /* iocbq is prepped for internal consumption. Physical vpi. */
16964 first_iocbq->iocb.unsli3.rcvsli3.vpi =
16965 vport->phba->vpi_ids[vport->vpi];
16966 /* put the first buffer into the first IOCBq */
16967 tot_len = bf_get(lpfc_rcqe_length,
16968 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16970 first_iocbq->context2 = &seq_dmabuf->dbuf;
16971 first_iocbq->context3 = NULL;
16972 first_iocbq->iocb.ulpBdeCount = 1;
16973 if (tot_len > LPFC_DATA_BUF_SIZE)
16974 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16975 LPFC_DATA_BUF_SIZE;
16977 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16979 first_iocbq->iocb.un.rcvels.remoteID = sid;
16981 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16983 iocbq = first_iocbq;
16985 * Each IOCBq can have two Buffers assigned, so go through the list
16986 * of buffers for this sequence and save two buffers in each IOCBq
16988 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16990 lpfc_in_buf_free(vport->phba, d_buf);
16993 if (!iocbq->context3) {
16994 iocbq->context3 = d_buf;
16995 iocbq->iocb.ulpBdeCount++;
16996 /* We need to get the size out of the right CQE */
16997 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16998 len = bf_get(lpfc_rcqe_length,
16999 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17000 pbde = (struct ulp_bde64 *)
17001 &iocbq->iocb.unsli3.sli3Words[4];
17002 if (len > LPFC_DATA_BUF_SIZE)
17003 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17005 pbde->tus.f.bdeSize = len;
17007 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17010 iocbq = lpfc_sli_get_iocbq(vport->phba);
17013 first_iocbq->iocb.ulpStatus =
17014 IOSTAT_FCP_RSP_ERROR;
17015 first_iocbq->iocb.un.ulpWord[4] =
17016 IOERR_NO_RESOURCES;
17018 lpfc_in_buf_free(vport->phba, d_buf);
17021 /* We need to get the size out of the right CQE */
17022 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17023 len = bf_get(lpfc_rcqe_length,
17024 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17025 iocbq->context2 = d_buf;
17026 iocbq->context3 = NULL;
17027 iocbq->iocb.ulpBdeCount = 1;
17028 if (len > LPFC_DATA_BUF_SIZE)
17029 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17030 LPFC_DATA_BUF_SIZE;
17032 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17035 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17037 iocbq->iocb.un.rcvels.remoteID = sid;
17038 list_add_tail(&iocbq->list, &first_iocbq->list);
17041 /* Free the sequence's header buffer */
17043 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
17045 return first_iocbq;
17049 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17050 struct hbq_dmabuf *seq_dmabuf)
17052 struct fc_frame_header *fc_hdr;
17053 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17054 struct lpfc_hba *phba = vport->phba;
17056 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17057 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17059 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17060 "2707 Ring %d handler: Failed to allocate "
17061 "iocb Rctl x%x Type x%x received\n",
17063 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17066 if (!lpfc_complete_unsol_iocb(phba,
17067 phba->sli4_hba.els_wq->pring,
17068 iocbq, fc_hdr->fh_r_ctl,
17070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17071 "2540 Ring %d handler: unexpected Rctl "
17072 "x%x Type x%x received\n",
17074 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17076 /* Free iocb created in lpfc_prep_seq */
17077 list_for_each_entry_safe(curr_iocb, next_iocb,
17078 &iocbq->list, list) {
17079 list_del_init(&curr_iocb->list);
17080 lpfc_sli_release_iocbq(phba, curr_iocb);
17082 lpfc_sli_release_iocbq(phba, iocbq);
17086 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17087 struct lpfc_iocbq *rspiocb)
17089 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17091 if (pcmd && pcmd->virt)
17092 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17094 lpfc_sli_release_iocbq(phba, cmdiocb);
17098 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17099 struct hbq_dmabuf *dmabuf)
17101 struct fc_frame_header *fc_hdr;
17102 struct lpfc_hba *phba = vport->phba;
17103 struct lpfc_iocbq *iocbq = NULL;
17104 union lpfc_wqe *wqe;
17105 struct lpfc_dmabuf *pcmd = NULL;
17106 uint32_t frame_len;
17109 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17110 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17112 /* Send the received frame back */
17113 iocbq = lpfc_sli_get_iocbq(phba);
17117 /* Allocate buffer for command payload */
17118 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17120 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17122 if (!pcmd || !pcmd->virt)
17125 INIT_LIST_HEAD(&pcmd->list);
17127 /* copyin the payload */
17128 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17130 /* fill in BDE's for command */
17131 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17132 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17133 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17134 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17136 iocbq->context2 = pcmd;
17137 iocbq->vport = vport;
17138 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17139 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17142 * Setup rest of the iocb as though it were a WQE
17143 * Build the SEND_FRAME WQE
17145 wqe = (union lpfc_wqe *)&iocbq->iocb;
17147 wqe->send_frame.frame_len = frame_len;
17148 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17149 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17150 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17151 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17152 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17153 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17155 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17156 iocbq->iocb.ulpLe = 1;
17157 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17158 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17159 if (rc == IOCB_ERROR)
17162 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17166 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17167 "2023 Unable to process MDS loopback frame\n");
17168 if (pcmd && pcmd->virt)
17169 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17172 lpfc_sli_release_iocbq(phba, iocbq);
17173 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17177 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17178 * @phba: Pointer to HBA context object.
17180 * This function is called with no lock held. This function processes all
17181 * the received buffers and gives it to upper layers when a received buffer
17182 * indicates that it is the final frame in the sequence. The interrupt
17183 * service routine processes received buffers at interrupt contexts.
17184 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17185 * appropriate receive function when the final frame in a sequence is received.
17188 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17189 struct hbq_dmabuf *dmabuf)
17191 struct hbq_dmabuf *seq_dmabuf;
17192 struct fc_frame_header *fc_hdr;
17193 struct lpfc_vport *vport;
17197 /* Process each received buffer */
17198 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17200 /* check to see if this a valid type of frame */
17201 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17202 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17206 if ((bf_get(lpfc_cqe_code,
17207 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17208 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17209 &dmabuf->cq_event.cqe.rcqe_cmpl);
17211 fcfi = bf_get(lpfc_rcqe_fcf_id,
17212 &dmabuf->cq_event.cqe.rcqe_cmpl);
17214 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17215 vport = phba->pport;
17216 /* Handle MDS Loopback frames */
17217 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17221 /* d_id this frame is directed to */
17222 did = sli4_did_from_fc_hdr(fc_hdr);
17224 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17226 /* throw out the frame */
17227 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17231 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17232 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17233 (did != Fabric_DID)) {
17235 * Throw out the frame if we are not pt2pt.
17236 * The pt2pt protocol allows for discovery frames
17237 * to be received without a registered VPI.
17239 if (!(vport->fc_flag & FC_PT2PT) ||
17240 (phba->link_state == LPFC_HBA_READY)) {
17241 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17246 /* Handle the basic abort sequence (BA_ABTS) event */
17247 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17248 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17252 /* Link this frame */
17253 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17255 /* unable to add frame to vport - throw it out */
17256 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17259 /* If not last frame in sequence continue processing frames. */
17260 if (!lpfc_seq_complete(seq_dmabuf))
17263 /* Send the complete sequence to the upper layer protocol */
17264 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17268 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17269 * @phba: pointer to lpfc hba data structure.
17271 * This routine is invoked to post rpi header templates to the
17272 * HBA consistent with the SLI-4 interface spec. This routine
17273 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17274 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17276 * This routine does not require any locks. It's usage is expected
17277 * to be driver load or reset recovery when the driver is
17282 * -EIO - The mailbox failed to complete successfully.
17283 * When this error occurs, the driver is not guaranteed
17284 * to have any rpi regions posted to the device and
17285 * must either attempt to repost the regions or take a
17289 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17291 struct lpfc_rpi_hdr *rpi_page;
17295 /* SLI4 ports that support extents do not require RPI headers. */
17296 if (!phba->sli4_hba.rpi_hdrs_in_use)
17298 if (phba->sli4_hba.extents_in_use)
17301 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17303 * Assign the rpi headers a physical rpi only if the driver
17304 * has not initialized those resources. A port reset only
17305 * needs the headers posted.
17307 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17309 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17311 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17312 if (rc != MBX_SUCCESS) {
17313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17314 "2008 Error %d posting all rpi "
17322 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17323 LPFC_RPI_RSRC_RDY);
17328 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17329 * @phba: pointer to lpfc hba data structure.
17330 * @rpi_page: pointer to the rpi memory region.
17332 * This routine is invoked to post a single rpi header to the
17333 * HBA consistent with the SLI-4 interface spec. This memory region
17334 * maps up to 64 rpi context regions.
17338 * -ENOMEM - No available memory
17339 * -EIO - The mailbox failed to complete successfully.
17342 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17344 LPFC_MBOXQ_t *mboxq;
17345 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17347 uint32_t shdr_status, shdr_add_status;
17348 union lpfc_sli4_cfg_shdr *shdr;
17350 /* SLI4 ports that support extents do not require RPI headers. */
17351 if (!phba->sli4_hba.rpi_hdrs_in_use)
17353 if (phba->sli4_hba.extents_in_use)
17356 /* The port is notified of the header region via a mailbox command. */
17357 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17359 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17360 "2001 Unable to allocate memory for issuing "
17361 "SLI_CONFIG_SPECIAL mailbox command\n");
17365 /* Post all rpi memory regions to the port. */
17366 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17367 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17368 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17369 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17370 sizeof(struct lpfc_sli4_cfg_mhdr),
17371 LPFC_SLI4_MBX_EMBED);
17374 /* Post the physical rpi to the port for this rpi header. */
17375 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17376 rpi_page->start_rpi);
17377 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17378 hdr_tmpl, rpi_page->page_count);
17380 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17381 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17382 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17383 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17386 if (rc != MBX_TIMEOUT)
17387 mempool_free(mboxq, phba->mbox_mem_pool);
17388 if (shdr_status || shdr_add_status || rc) {
17389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17390 "2514 POST_RPI_HDR mailbox failed with "
17391 "status x%x add_status x%x, mbx status x%x\n",
17392 shdr_status, shdr_add_status, rc);
17396 * The next_rpi stores the next logical module-64 rpi value used
17397 * to post physical rpis in subsequent rpi postings.
17399 spin_lock_irq(&phba->hbalock);
17400 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17401 spin_unlock_irq(&phba->hbalock);
17407 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17408 * @phba: pointer to lpfc hba data structure.
17410 * This routine is invoked to post rpi header templates to the
17411 * HBA consistent with the SLI-4 interface spec. This routine
17412 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17413 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17416 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17417 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17420 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17423 uint16_t max_rpi, rpi_limit;
17424 uint16_t rpi_remaining, lrpi = 0;
17425 struct lpfc_rpi_hdr *rpi_hdr;
17426 unsigned long iflag;
17429 * Fetch the next logical rpi. Because this index is logical,
17430 * the driver starts at 0 each time.
17432 spin_lock_irqsave(&phba->hbalock, iflag);
17433 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17434 rpi_limit = phba->sli4_hba.next_rpi;
17436 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17437 if (rpi >= rpi_limit)
17438 rpi = LPFC_RPI_ALLOC_ERROR;
17440 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17441 phba->sli4_hba.max_cfg_param.rpi_used++;
17442 phba->sli4_hba.rpi_count++;
17444 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17445 "0001 rpi:%x max:%x lim:%x\n",
17446 (int) rpi, max_rpi, rpi_limit);
17449 * Don't try to allocate more rpi header regions if the device limit
17450 * has been exhausted.
17452 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17453 (phba->sli4_hba.rpi_count >= max_rpi)) {
17454 spin_unlock_irqrestore(&phba->hbalock, iflag);
17459 * RPI header postings are not required for SLI4 ports capable of
17462 if (!phba->sli4_hba.rpi_hdrs_in_use) {
17463 spin_unlock_irqrestore(&phba->hbalock, iflag);
17468 * If the driver is running low on rpi resources, allocate another
17469 * page now. Note that the next_rpi value is used because
17470 * it represents how many are actually in use whereas max_rpi notes
17471 * how many are supported max by the device.
17473 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17474 spin_unlock_irqrestore(&phba->hbalock, iflag);
17475 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17476 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17479 "2002 Error Could not grow rpi "
17482 lrpi = rpi_hdr->start_rpi;
17483 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17484 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17492 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17493 * @phba: pointer to lpfc hba data structure.
17495 * This routine is invoked to release an rpi to the pool of
17496 * available rpis maintained by the driver.
17499 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17502 * if the rpi value indicates a prior unreg has already
17503 * been done, skip the unreg.
17505 if (rpi == LPFC_RPI_ALLOC_ERROR)
17508 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17509 phba->sli4_hba.rpi_count--;
17510 phba->sli4_hba.max_cfg_param.rpi_used--;
17515 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17516 * @phba: pointer to lpfc hba data structure.
17518 * This routine is invoked to release an rpi to the pool of
17519 * available rpis maintained by the driver.
17522 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17524 spin_lock_irq(&phba->hbalock);
17525 __lpfc_sli4_free_rpi(phba, rpi);
17526 spin_unlock_irq(&phba->hbalock);
17530 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17531 * @phba: pointer to lpfc hba data structure.
17533 * This routine is invoked to remove the memory region that
17534 * provided rpi via a bitmask.
17537 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17539 kfree(phba->sli4_hba.rpi_bmask);
17540 kfree(phba->sli4_hba.rpi_ids);
17541 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17545 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17546 * @phba: pointer to lpfc hba data structure.
17548 * This routine is invoked to remove the memory region that
17549 * provided rpi via a bitmask.
17552 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17553 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17555 LPFC_MBOXQ_t *mboxq;
17556 struct lpfc_hba *phba = ndlp->phba;
17559 /* The port is notified of the header region via a mailbox command. */
17560 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17564 /* Post all rpi memory regions to the port. */
17565 lpfc_resume_rpi(mboxq, ndlp);
17567 mboxq->mbox_cmpl = cmpl;
17568 mboxq->context1 = arg;
17569 mboxq->context2 = ndlp;
17571 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17572 mboxq->vport = ndlp->vport;
17573 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17574 if (rc == MBX_NOT_FINISHED) {
17575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17576 "2010 Resume RPI Mailbox failed "
17577 "status %d, mbxStatus x%x\n", rc,
17578 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17579 mempool_free(mboxq, phba->mbox_mem_pool);
17586 * lpfc_sli4_init_vpi - Initialize a vpi with the port
17587 * @vport: Pointer to the vport for which the vpi is being initialized
17589 * This routine is invoked to activate a vpi with the port.
17593 * -Evalue otherwise
17596 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17598 LPFC_MBOXQ_t *mboxq;
17600 int retval = MBX_SUCCESS;
17602 struct lpfc_hba *phba = vport->phba;
17603 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17606 lpfc_init_vpi(phba, mboxq, vport->vpi);
17607 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17608 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17609 if (rc != MBX_SUCCESS) {
17610 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17611 "2022 INIT VPI Mailbox failed "
17612 "status %d, mbxStatus x%x\n", rc,
17613 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17616 if (rc != MBX_TIMEOUT)
17617 mempool_free(mboxq, vport->phba->mbox_mem_pool);
17623 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17624 * @phba: pointer to lpfc hba data structure.
17625 * @mboxq: Pointer to mailbox object.
17627 * This routine is invoked to manually add a single FCF record. The caller
17628 * must pass a completely initialized FCF_Record. This routine takes
17629 * care of the nonembedded mailbox operations.
17632 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17635 union lpfc_sli4_cfg_shdr *shdr;
17636 uint32_t shdr_status, shdr_add_status;
17638 virt_addr = mboxq->sge_array->addr[0];
17639 /* The IOCTL status is embedded in the mailbox subheader. */
17640 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17641 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17642 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17644 if ((shdr_status || shdr_add_status) &&
17645 (shdr_status != STATUS_FCF_IN_USE))
17646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17647 "2558 ADD_FCF_RECORD mailbox failed with "
17648 "status x%x add_status x%x\n",
17649 shdr_status, shdr_add_status);
17651 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17655 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17656 * @phba: pointer to lpfc hba data structure.
17657 * @fcf_record: pointer to the initialized fcf record to add.
17659 * This routine is invoked to manually add a single FCF record. The caller
17660 * must pass a completely initialized FCF_Record. This routine takes
17661 * care of the nonembedded mailbox operations.
17664 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17667 LPFC_MBOXQ_t *mboxq;
17670 struct lpfc_mbx_sge sge;
17671 uint32_t alloc_len, req_len;
17674 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17677 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17681 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17684 /* Allocate DMA memory and set up the non-embedded mailbox command */
17685 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17686 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17687 req_len, LPFC_SLI4_MBX_NEMBED);
17688 if (alloc_len < req_len) {
17689 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17690 "2523 Allocated DMA memory size (x%x) is "
17691 "less than the requested DMA memory "
17692 "size (x%x)\n", alloc_len, req_len);
17693 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17698 * Get the first SGE entry from the non-embedded DMA memory. This
17699 * routine only uses a single SGE.
17701 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
17702 virt_addr = mboxq->sge_array->addr[0];
17704 * Configure the FCF record for FCFI 0. This is the driver's
17705 * hardcoded default and gets used in nonFIP mode.
17707 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
17708 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
17709 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
17712 * Copy the fcf_index and the FCF Record Data. The data starts after
17713 * the FCoE header plus word10. The data copy needs to be endian
17716 bytep += sizeof(uint32_t);
17717 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
17718 mboxq->vport = phba->pport;
17719 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
17720 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17721 if (rc == MBX_NOT_FINISHED) {
17722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17723 "2515 ADD_FCF_RECORD mailbox failed with "
17724 "status 0x%x\n", rc);
17725 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17734 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
17735 * @phba: pointer to lpfc hba data structure.
17736 * @fcf_record: pointer to the fcf record to write the default data.
17737 * @fcf_index: FCF table entry index.
17739 * This routine is invoked to build the driver's default FCF record. The
17740 * values used are hardcoded. This routine handles memory initialization.
17744 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
17745 struct fcf_record *fcf_record,
17746 uint16_t fcf_index)
17748 memset(fcf_record, 0, sizeof(struct fcf_record));
17749 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
17750 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
17751 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
17752 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
17753 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
17754 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
17755 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
17756 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
17757 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
17758 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
17759 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
17760 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
17761 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
17762 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
17763 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
17764 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
17765 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
17766 /* Set the VLAN bit map */
17767 if (phba->valid_vlan) {
17768 fcf_record->vlan_bitmap[phba->vlan_id / 8]
17769 = 1 << (phba->vlan_id % 8);
17774 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
17775 * @phba: pointer to lpfc hba data structure.
17776 * @fcf_index: FCF table entry offset.
17778 * This routine is invoked to scan the entire FCF table by reading FCF
17779 * record and processing it one at a time starting from the @fcf_index
17780 * for initial FCF discovery or fast FCF failover rediscovery.
17782 * Return 0 if the mailbox command is submitted successfully, none 0
17786 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17789 LPFC_MBOXQ_t *mboxq;
17791 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
17792 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
17793 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17796 "2000 Failed to allocate mbox for "
17799 goto fail_fcf_scan;
17801 /* Construct the read FCF record mailbox command */
17802 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17805 goto fail_fcf_scan;
17807 /* Issue the mailbox command asynchronously */
17808 mboxq->vport = phba->pport;
17809 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
17811 spin_lock_irq(&phba->hbalock);
17812 phba->hba_flag |= FCF_TS_INPROG;
17813 spin_unlock_irq(&phba->hbalock);
17815 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17816 if (rc == MBX_NOT_FINISHED)
17819 /* Reset eligible FCF count for new scan */
17820 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
17821 phba->fcf.eligible_fcf_cnt = 0;
17827 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17828 /* FCF scan failed, clear FCF_TS_INPROG flag */
17829 spin_lock_irq(&phba->hbalock);
17830 phba->hba_flag &= ~FCF_TS_INPROG;
17831 spin_unlock_irq(&phba->hbalock);
17837 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
17838 * @phba: pointer to lpfc hba data structure.
17839 * @fcf_index: FCF table entry offset.
17841 * This routine is invoked to read an FCF record indicated by @fcf_index
17842 * and to use it for FLOGI roundrobin FCF failover.
17844 * Return 0 if the mailbox command is submitted successfully, none 0
17848 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17851 LPFC_MBOXQ_t *mboxq;
17853 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17855 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17856 "2763 Failed to allocate mbox for "
17859 goto fail_fcf_read;
17861 /* Construct the read FCF record mailbox command */
17862 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17865 goto fail_fcf_read;
17867 /* Issue the mailbox command asynchronously */
17868 mboxq->vport = phba->pport;
17869 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
17870 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17871 if (rc == MBX_NOT_FINISHED)
17877 if (error && mboxq)
17878 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17883 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
17884 * @phba: pointer to lpfc hba data structure.
17885 * @fcf_index: FCF table entry offset.
17887 * This routine is invoked to read an FCF record indicated by @fcf_index to
17888 * determine whether it's eligible for FLOGI roundrobin failover list.
17890 * Return 0 if the mailbox command is submitted successfully, none 0
17894 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17897 LPFC_MBOXQ_t *mboxq;
17899 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17901 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17902 "2758 Failed to allocate mbox for "
17905 goto fail_fcf_read;
17907 /* Construct the read FCF record mailbox command */
17908 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17911 goto fail_fcf_read;
17913 /* Issue the mailbox command asynchronously */
17914 mboxq->vport = phba->pport;
17915 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
17916 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17917 if (rc == MBX_NOT_FINISHED)
17923 if (error && mboxq)
17924 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17929 * lpfc_check_next_fcf_pri_level
17930 * phba pointer to the lpfc_hba struct for this port.
17931 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
17932 * routine when the rr_bmask is empty. The FCF indecies are put into the
17933 * rr_bmask based on their priority level. Starting from the highest priority
17934 * to the lowest. The most likely FCF candidate will be in the highest
17935 * priority group. When this routine is called it searches the fcf_pri list for
17936 * next lowest priority group and repopulates the rr_bmask with only those
17939 * 1=success 0=failure
17942 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
17944 uint16_t next_fcf_pri;
17945 uint16_t last_index;
17946 struct lpfc_fcf_pri *fcf_pri;
17950 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
17951 LPFC_SLI4_FCF_TBL_INDX_MAX);
17952 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17953 "3060 Last IDX %d\n", last_index);
17955 /* Verify the priority list has 2 or more entries */
17956 spin_lock_irq(&phba->hbalock);
17957 if (list_empty(&phba->fcf.fcf_pri_list) ||
17958 list_is_singular(&phba->fcf.fcf_pri_list)) {
17959 spin_unlock_irq(&phba->hbalock);
17960 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17961 "3061 Last IDX %d\n", last_index);
17962 return 0; /* Empty rr list */
17964 spin_unlock_irq(&phba->hbalock);
17968 * Clear the rr_bmask and set all of the bits that are at this
17971 memset(phba->fcf.fcf_rr_bmask, 0,
17972 sizeof(*phba->fcf.fcf_rr_bmask));
17973 spin_lock_irq(&phba->hbalock);
17974 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17975 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
17978 * the 1st priority that has not FLOGI failed
17979 * will be the highest.
17982 next_fcf_pri = fcf_pri->fcf_rec.priority;
17983 spin_unlock_irq(&phba->hbalock);
17984 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17985 rc = lpfc_sli4_fcf_rr_index_set(phba,
17986 fcf_pri->fcf_rec.fcf_index);
17990 spin_lock_irq(&phba->hbalock);
17993 * if next_fcf_pri was not set above and the list is not empty then
17994 * we have failed flogis on all of them. So reset flogi failed
17995 * and start at the beginning.
17997 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
17998 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17999 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18001 * the 1st priority that has not FLOGI failed
18002 * will be the highest.
18005 next_fcf_pri = fcf_pri->fcf_rec.priority;
18006 spin_unlock_irq(&phba->hbalock);
18007 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18008 rc = lpfc_sli4_fcf_rr_index_set(phba,
18009 fcf_pri->fcf_rec.fcf_index);
18013 spin_lock_irq(&phba->hbalock);
18017 spin_unlock_irq(&phba->hbalock);
18022 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18023 * @phba: pointer to lpfc hba data structure.
18025 * This routine is to get the next eligible FCF record index in a round
18026 * robin fashion. If the next eligible FCF record index equals to the
18027 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18028 * shall be returned, otherwise, the next eligible FCF record's index
18029 * shall be returned.
18032 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18034 uint16_t next_fcf_index;
18037 /* Search start from next bit of currently registered FCF index */
18038 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18041 /* Determine the next fcf index to check */
18042 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18043 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18044 LPFC_SLI4_FCF_TBL_INDX_MAX,
18047 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18048 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18050 * If we have wrapped then we need to clear the bits that
18051 * have been tested so that we can detect when we should
18052 * change the priority level.
18054 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18055 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18059 /* Check roundrobin failover list empty condition */
18060 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18061 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18063 * If next fcf index is not found check if there are lower
18064 * Priority level fcf's in the fcf_priority list.
18065 * Set up the rr_bmask with all of the avaiable fcf bits
18066 * at that level and continue the selection process.
18068 if (lpfc_check_next_fcf_pri_level(phba))
18069 goto initial_priority;
18070 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18071 "2844 No roundrobin failover FCF available\n");
18073 return LPFC_FCOE_FCF_NEXT_NONE;
18076 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18077 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18078 LPFC_FCF_FLOGI_FAILED) {
18079 if (list_is_singular(&phba->fcf.fcf_pri_list))
18080 return LPFC_FCOE_FCF_NEXT_NONE;
18082 goto next_priority;
18085 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18086 "2845 Get next roundrobin failover FCF (x%x)\n",
18089 return next_fcf_index;
18093 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18094 * @phba: pointer to lpfc hba data structure.
18096 * This routine sets the FCF record index in to the eligible bmask for
18097 * roundrobin failover search. It checks to make sure that the index
18098 * does not go beyond the range of the driver allocated bmask dimension
18099 * before setting the bit.
18101 * Returns 0 if the index bit successfully set, otherwise, it returns
18105 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18107 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18108 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18109 "2610 FCF (x%x) reached driver's book "
18110 "keeping dimension:x%x\n",
18111 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18114 /* Set the eligible FCF record index bmask */
18115 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18117 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18118 "2790 Set FCF (x%x) to roundrobin FCF failover "
18119 "bmask\n", fcf_index);
18125 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18126 * @phba: pointer to lpfc hba data structure.
18128 * This routine clears the FCF record index from the eligible bmask for
18129 * roundrobin failover search. It checks to make sure that the index
18130 * does not go beyond the range of the driver allocated bmask dimension
18131 * before clearing the bit.
18134 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18136 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18137 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18138 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18139 "2762 FCF (x%x) reached driver's book "
18140 "keeping dimension:x%x\n",
18141 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18144 /* Clear the eligible FCF record index bmask */
18145 spin_lock_irq(&phba->hbalock);
18146 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18148 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18149 list_del_init(&fcf_pri->list);
18153 spin_unlock_irq(&phba->hbalock);
18154 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18156 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18157 "2791 Clear FCF (x%x) from roundrobin failover "
18158 "bmask\n", fcf_index);
18162 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18163 * @phba: pointer to lpfc hba data structure.
18165 * This routine is the completion routine for the rediscover FCF table mailbox
18166 * command. If the mailbox command returned failure, it will try to stop the
18167 * FCF rediscover wait timer.
18170 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18172 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18173 uint32_t shdr_status, shdr_add_status;
18175 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18177 shdr_status = bf_get(lpfc_mbox_hdr_status,
18178 &redisc_fcf->header.cfg_shdr.response);
18179 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18180 &redisc_fcf->header.cfg_shdr.response);
18181 if (shdr_status || shdr_add_status) {
18182 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18183 "2746 Requesting for FCF rediscovery failed "
18184 "status x%x add_status x%x\n",
18185 shdr_status, shdr_add_status);
18186 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18187 spin_lock_irq(&phba->hbalock);
18188 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18189 spin_unlock_irq(&phba->hbalock);
18191 * CVL event triggered FCF rediscover request failed,
18192 * last resort to re-try current registered FCF entry.
18194 lpfc_retry_pport_discovery(phba);
18196 spin_lock_irq(&phba->hbalock);
18197 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18198 spin_unlock_irq(&phba->hbalock);
18200 * DEAD FCF event triggered FCF rediscover request
18201 * failed, last resort to fail over as a link down
18202 * to FCF registration.
18204 lpfc_sli4_fcf_dead_failthrough(phba);
18207 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18208 "2775 Start FCF rediscover quiescent timer\n");
18210 * Start FCF rediscovery wait timer for pending FCF
18211 * before rescan FCF record table.
18213 lpfc_fcf_redisc_wait_start_timer(phba);
18216 mempool_free(mbox, phba->mbox_mem_pool);
18220 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18221 * @phba: pointer to lpfc hba data structure.
18223 * This routine is invoked to request for rediscovery of the entire FCF table
18227 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18229 LPFC_MBOXQ_t *mbox;
18230 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18233 /* Cancel retry delay timers to all vports before FCF rediscover */
18234 lpfc_cancel_all_vport_retry_delay_timer(phba);
18236 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18239 "2745 Failed to allocate mbox for "
18240 "requesting FCF rediscover.\n");
18244 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18245 sizeof(struct lpfc_sli4_cfg_mhdr));
18246 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18247 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18248 length, LPFC_SLI4_MBX_EMBED);
18250 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18251 /* Set count to 0 for invalidating the entire FCF database */
18252 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18254 /* Issue the mailbox command asynchronously */
18255 mbox->vport = phba->pport;
18256 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18257 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18259 if (rc == MBX_NOT_FINISHED) {
18260 mempool_free(mbox, phba->mbox_mem_pool);
18267 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18268 * @phba: pointer to lpfc hba data structure.
18270 * This function is the failover routine as a last resort to the FCF DEAD
18271 * event when driver failed to perform fast FCF failover.
18274 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18276 uint32_t link_state;
18279 * Last resort as FCF DEAD event failover will treat this as
18280 * a link down, but save the link state because we don't want
18281 * it to be changed to Link Down unless it is already down.
18283 link_state = phba->link_state;
18284 lpfc_linkdown(phba);
18285 phba->link_state = link_state;
18287 /* Unregister FCF if no devices connected to it */
18288 lpfc_unregister_unused_fcf(phba);
18292 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18293 * @phba: pointer to lpfc hba data structure.
18294 * @rgn23_data: pointer to configure region 23 data.
18296 * This function gets SLI3 port configure region 23 data through memory dump
18297 * mailbox command. When it successfully retrieves data, the size of the data
18298 * will be returned, otherwise, 0 will be returned.
18301 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18303 LPFC_MBOXQ_t *pmb = NULL;
18305 uint32_t offset = 0;
18311 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18314 "2600 failed to allocate mailbox memory\n");
18320 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18321 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18323 if (rc != MBX_SUCCESS) {
18324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18325 "2601 failed to read config "
18326 "region 23, rc 0x%x Status 0x%x\n",
18327 rc, mb->mbxStatus);
18328 mb->un.varDmp.word_cnt = 0;
18331 * dump mem may return a zero when finished or we got a
18332 * mailbox error, either way we are done.
18334 if (mb->un.varDmp.word_cnt == 0)
18336 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18337 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18339 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18340 rgn23_data + offset,
18341 mb->un.varDmp.word_cnt);
18342 offset += mb->un.varDmp.word_cnt;
18343 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18345 mempool_free(pmb, phba->mbox_mem_pool);
18350 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18351 * @phba: pointer to lpfc hba data structure.
18352 * @rgn23_data: pointer to configure region 23 data.
18354 * This function gets SLI4 port configure region 23 data through memory dump
18355 * mailbox command. When it successfully retrieves data, the size of the data
18356 * will be returned, otherwise, 0 will be returned.
18359 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18361 LPFC_MBOXQ_t *mboxq = NULL;
18362 struct lpfc_dmabuf *mp = NULL;
18363 struct lpfc_mqe *mqe;
18364 uint32_t data_length = 0;
18370 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18373 "3105 failed to allocate mailbox memory\n");
18377 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18379 mqe = &mboxq->u.mqe;
18380 mp = (struct lpfc_dmabuf *) mboxq->context1;
18381 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18384 data_length = mqe->un.mb_words[5];
18385 if (data_length == 0)
18387 if (data_length > DMP_RGN23_SIZE) {
18391 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18393 mempool_free(mboxq, phba->mbox_mem_pool);
18395 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18398 return data_length;
18402 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18403 * @phba: pointer to lpfc hba data structure.
18405 * This function read region 23 and parse TLV for port status to
18406 * decide if the user disaled the port. If the TLV indicates the
18407 * port is disabled, the hba_flag is set accordingly.
18410 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18412 uint8_t *rgn23_data = NULL;
18413 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18414 uint32_t offset = 0;
18416 /* Get adapter Region 23 data */
18417 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18421 if (phba->sli_rev < LPFC_SLI_REV4)
18422 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18424 if_type = bf_get(lpfc_sli_intf_if_type,
18425 &phba->sli4_hba.sli_intf);
18426 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18428 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18434 /* Check the region signature first */
18435 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18437 "2619 Config region 23 has bad signature\n");
18442 /* Check the data structure version */
18443 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18445 "2620 Config region 23 has bad version\n");
18450 /* Parse TLV entries in the region */
18451 while (offset < data_size) {
18452 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18455 * If the TLV is not driver specific TLV or driver id is
18456 * not linux driver id, skip the record.
18458 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18459 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18460 (rgn23_data[offset + 3] != 0)) {
18461 offset += rgn23_data[offset + 1] * 4 + 4;
18465 /* Driver found a driver specific TLV in the config region */
18466 sub_tlv_len = rgn23_data[offset + 1] * 4;
18471 * Search for configured port state sub-TLV.
18473 while ((offset < data_size) &&
18474 (tlv_offset < sub_tlv_len)) {
18475 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18480 if (rgn23_data[offset] != PORT_STE_TYPE) {
18481 offset += rgn23_data[offset + 1] * 4 + 4;
18482 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18486 /* This HBA contains PORT_STE configured */
18487 if (!rgn23_data[offset + 2])
18488 phba->hba_flag |= LINK_DISABLED;
18500 * lpfc_wr_object - write an object to the firmware
18501 * @phba: HBA structure that indicates port to create a queue on.
18502 * @dmabuf_list: list of dmabufs to write to the port.
18503 * @size: the total byte value of the objects to write to the port.
18504 * @offset: the current offset to be used to start the transfer.
18506 * This routine will create a wr_object mailbox command to send to the port.
18507 * the mailbox command will be constructed using the dma buffers described in
18508 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18509 * BDEs that the imbedded mailbox can support. The @offset variable will be
18510 * used to indicate the starting offset of the transfer and will also return
18511 * the offset after the write object mailbox has completed. @size is used to
18512 * determine the end of the object and whether the eof bit should be set.
18514 * Return 0 is successful and offset will contain the the new offset to use
18515 * for the next write.
18516 * Return negative value for error cases.
18519 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18520 uint32_t size, uint32_t *offset)
18522 struct lpfc_mbx_wr_object *wr_object;
18523 LPFC_MBOXQ_t *mbox;
18525 uint32_t shdr_status, shdr_add_status;
18527 union lpfc_sli4_cfg_shdr *shdr;
18528 struct lpfc_dmabuf *dmabuf;
18529 uint32_t written = 0;
18531 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18535 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18536 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18537 sizeof(struct lpfc_mbx_wr_object) -
18538 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18540 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18541 wr_object->u.request.write_offset = *offset;
18542 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18543 wr_object->u.request.object_name[0] =
18544 cpu_to_le32(wr_object->u.request.object_name[0]);
18545 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18546 list_for_each_entry(dmabuf, dmabuf_list, list) {
18547 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18549 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18550 wr_object->u.request.bde[i].addrHigh =
18551 putPaddrHigh(dmabuf->phys);
18552 if (written + SLI4_PAGE_SIZE >= size) {
18553 wr_object->u.request.bde[i].tus.f.bdeSize =
18555 written += (size - written);
18556 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18558 wr_object->u.request.bde[i].tus.f.bdeSize =
18560 written += SLI4_PAGE_SIZE;
18564 wr_object->u.request.bde_count = i;
18565 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18566 if (!phba->sli4_hba.intr_enable)
18567 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18569 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18570 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18572 /* The IOCTL status is embedded in the mailbox subheader. */
18573 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18574 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18575 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18576 if (rc != MBX_TIMEOUT)
18577 mempool_free(mbox, phba->mbox_mem_pool);
18578 if (shdr_status || shdr_add_status || rc) {
18579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18580 "3025 Write Object mailbox failed with "
18581 "status x%x add_status x%x, mbx status x%x\n",
18582 shdr_status, shdr_add_status, rc);
18585 *offset += wr_object->u.response.actual_write_length;
18590 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18591 * @vport: pointer to vport data structure.
18593 * This function iterate through the mailboxq and clean up all REG_LOGIN
18594 * and REG_VPI mailbox commands associated with the vport. This function
18595 * is called when driver want to restart discovery of the vport due to
18596 * a Clear Virtual Link event.
18599 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18601 struct lpfc_hba *phba = vport->phba;
18602 LPFC_MBOXQ_t *mb, *nextmb;
18603 struct lpfc_dmabuf *mp;
18604 struct lpfc_nodelist *ndlp;
18605 struct lpfc_nodelist *act_mbx_ndlp = NULL;
18606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
18607 LIST_HEAD(mbox_cmd_list);
18608 uint8_t restart_loop;
18610 /* Clean up internally queued mailbox commands with the vport */
18611 spin_lock_irq(&phba->hbalock);
18612 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18613 if (mb->vport != vport)
18616 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18617 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18620 list_del(&mb->list);
18621 list_add_tail(&mb->list, &mbox_cmd_list);
18623 /* Clean up active mailbox command with the vport */
18624 mb = phba->sli.mbox_active;
18625 if (mb && (mb->vport == vport)) {
18626 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18627 (mb->u.mb.mbxCommand == MBX_REG_VPI))
18628 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18629 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18630 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18631 /* Put reference count for delayed processing */
18632 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18633 /* Unregister the RPI when mailbox complete */
18634 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18637 /* Cleanup any mailbox completions which are not yet processed */
18640 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18642 * If this mailox is already processed or it is
18643 * for another vport ignore it.
18645 if ((mb->vport != vport) ||
18646 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18649 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18650 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18653 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18654 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18655 ndlp = (struct lpfc_nodelist *)mb->context2;
18656 /* Unregister the RPI when mailbox complete */
18657 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18659 spin_unlock_irq(&phba->hbalock);
18660 spin_lock(shost->host_lock);
18661 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18662 spin_unlock(shost->host_lock);
18663 spin_lock_irq(&phba->hbalock);
18667 } while (restart_loop);
18669 spin_unlock_irq(&phba->hbalock);
18671 /* Release the cleaned-up mailbox commands */
18672 while (!list_empty(&mbox_cmd_list)) {
18673 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
18674 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18675 mp = (struct lpfc_dmabuf *) (mb->context1);
18677 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18680 ndlp = (struct lpfc_nodelist *) mb->context2;
18681 mb->context2 = NULL;
18683 spin_lock(shost->host_lock);
18684 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18685 spin_unlock(shost->host_lock);
18686 lpfc_nlp_put(ndlp);
18689 mempool_free(mb, phba->mbox_mem_pool);
18692 /* Release the ndlp with the cleaned-up active mailbox command */
18693 if (act_mbx_ndlp) {
18694 spin_lock(shost->host_lock);
18695 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18696 spin_unlock(shost->host_lock);
18697 lpfc_nlp_put(act_mbx_ndlp);
18702 * lpfc_drain_txq - Drain the txq
18703 * @phba: Pointer to HBA context object.
18705 * This function attempt to submit IOCBs on the txq
18706 * to the adapter. For SLI4 adapters, the txq contains
18707 * ELS IOCBs that have been deferred because the there
18708 * are no SGLs. This congestion can occur with large
18709 * vport counts during node discovery.
18713 lpfc_drain_txq(struct lpfc_hba *phba)
18715 LIST_HEAD(completions);
18716 struct lpfc_sli_ring *pring;
18717 struct lpfc_iocbq *piocbq = NULL;
18718 unsigned long iflags = 0;
18719 char *fail_msg = NULL;
18720 struct lpfc_sglq *sglq;
18721 union lpfc_wqe128 wqe128;
18722 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
18723 uint32_t txq_cnt = 0;
18725 pring = lpfc_phba_elsring(phba);
18726 if (unlikely(!pring))
18729 spin_lock_irqsave(&pring->ring_lock, iflags);
18730 list_for_each_entry(piocbq, &pring->txq, list) {
18734 if (txq_cnt > pring->txq_max)
18735 pring->txq_max = txq_cnt;
18737 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18739 while (!list_empty(&pring->txq)) {
18740 spin_lock_irqsave(&pring->ring_lock, iflags);
18742 piocbq = lpfc_sli_ringtx_get(phba, pring);
18744 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18746 "2823 txq empty and txq_cnt is %d\n ",
18750 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
18752 __lpfc_sli_ringtx_put(phba, pring, piocbq);
18753 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18758 /* The xri and iocb resources secured,
18759 * attempt to issue request
18761 piocbq->sli4_lxritag = sglq->sli4_lxritag;
18762 piocbq->sli4_xritag = sglq->sli4_xritag;
18763 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
18764 fail_msg = "to convert bpl to sgl";
18765 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
18766 fail_msg = "to convert iocb to wqe";
18767 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
18768 fail_msg = " - Wq is full";
18770 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
18773 /* Failed means we can't issue and need to cancel */
18774 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18775 "2822 IOCB failed %s iotag 0x%x "
18778 piocbq->iotag, piocbq->sli4_xritag);
18779 list_add_tail(&piocbq->list, &completions);
18782 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18785 /* Cancel all the IOCBs that cannot be issued */
18786 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
18787 IOERR_SLI_ABORTED);
18793 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
18794 * @phba: Pointer to HBA context object.
18795 * @pwqe: Pointer to command WQE.
18796 * @sglq: Pointer to the scatter gather queue object.
18798 * This routine converts the bpl or bde that is in the WQE
18799 * to a sgl list for the sli4 hardware. The physical address
18800 * of the bpl/bde is converted back to a virtual address.
18801 * If the WQE contains a BPL then the list of BDE's is
18802 * converted to sli4_sge's. If the WQE contains a single
18803 * BDE then it is converted to a single sli_sge.
18804 * The WQE is still in cpu endianness so the contents of
18805 * the bpl can be used without byte swapping.
18807 * Returns valid XRI = Success, NO_XRI = Failure.
18810 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
18811 struct lpfc_sglq *sglq)
18813 uint16_t xritag = NO_XRI;
18814 struct ulp_bde64 *bpl = NULL;
18815 struct ulp_bde64 bde;
18816 struct sli4_sge *sgl = NULL;
18817 struct lpfc_dmabuf *dmabuf;
18818 union lpfc_wqe *wqe;
18821 uint32_t offset = 0; /* accumulated offset in the sg request list */
18822 int inbound = 0; /* number of sg reply entries inbound from firmware */
18825 if (!pwqeq || !sglq)
18828 sgl = (struct sli4_sge *)sglq->sgl;
18830 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
18832 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
18833 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
18834 return sglq->sli4_xritag;
18835 numBdes = pwqeq->rsvd2;
18837 /* The addrHigh and addrLow fields within the WQE
18838 * have not been byteswapped yet so there is no
18839 * need to swap them back.
18841 if (pwqeq->context3)
18842 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
18846 bpl = (struct ulp_bde64 *)dmabuf->virt;
18850 for (i = 0; i < numBdes; i++) {
18851 /* Should already be byte swapped. */
18852 sgl->addr_hi = bpl->addrHigh;
18853 sgl->addr_lo = bpl->addrLow;
18855 sgl->word2 = le32_to_cpu(sgl->word2);
18856 if ((i+1) == numBdes)
18857 bf_set(lpfc_sli4_sge_last, sgl, 1);
18859 bf_set(lpfc_sli4_sge_last, sgl, 0);
18860 /* swap the size field back to the cpu so we
18861 * can assign it to the sgl.
18863 bde.tus.w = le32_to_cpu(bpl->tus.w);
18864 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
18865 /* The offsets in the sgl need to be accumulated
18866 * separately for the request and reply lists.
18867 * The request is always first, the reply follows.
18870 case CMD_GEN_REQUEST64_WQE:
18871 /* add up the reply sg entries */
18872 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
18874 /* first inbound? reset the offset */
18877 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18878 bf_set(lpfc_sli4_sge_type, sgl,
18879 LPFC_SGE_TYPE_DATA);
18880 offset += bde.tus.f.bdeSize;
18882 case CMD_FCP_TRSP64_WQE:
18883 bf_set(lpfc_sli4_sge_offset, sgl, 0);
18884 bf_set(lpfc_sli4_sge_type, sgl,
18885 LPFC_SGE_TYPE_DATA);
18887 case CMD_FCP_TSEND64_WQE:
18888 case CMD_FCP_TRECEIVE64_WQE:
18889 bf_set(lpfc_sli4_sge_type, sgl,
18890 bpl->tus.f.bdeFlags);
18894 offset += bde.tus.f.bdeSize;
18895 bf_set(lpfc_sli4_sge_offset, sgl, offset);
18898 sgl->word2 = cpu_to_le32(sgl->word2);
18902 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
18903 /* The addrHigh and addrLow fields of the BDE have not
18904 * been byteswapped yet so they need to be swapped
18905 * before putting them in the sgl.
18907 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
18908 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
18909 sgl->word2 = le32_to_cpu(sgl->word2);
18910 bf_set(lpfc_sli4_sge_last, sgl, 1);
18911 sgl->word2 = cpu_to_le32(sgl->word2);
18912 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
18914 return sglq->sli4_xritag;
18918 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
18919 * @phba: Pointer to HBA context object.
18920 * @ring_number: Base sli ring number
18921 * @pwqe: Pointer to command WQE.
18924 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18925 struct lpfc_iocbq *pwqe)
18927 union lpfc_wqe *wqe = &pwqe->wqe;
18928 struct lpfc_nvmet_rcv_ctx *ctxp;
18929 struct lpfc_queue *wq;
18930 struct lpfc_sglq *sglq;
18931 struct lpfc_sli_ring *pring;
18932 unsigned long iflags;
18935 /* NVME_LS and NVME_LS ABTS requests. */
18936 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
18937 pring = phba->sli4_hba.nvmels_wq->pring;
18938 spin_lock_irqsave(&pring->ring_lock, iflags);
18939 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
18941 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18944 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18945 pwqe->sli4_xritag = sglq->sli4_xritag;
18946 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
18947 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18950 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18951 pwqe->sli4_xritag);
18952 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
18954 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18958 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18959 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18963 /* NVME_FCREQ and NVME_ABTS requests */
18964 if (pwqe->iocb_flag & LPFC_IO_NVME) {
18965 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18966 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18968 spin_lock_irqsave(&pring->ring_lock, iflags);
18969 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18970 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18971 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18972 ret = lpfc_sli4_wq_put(wq, wqe);
18974 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18977 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18978 spin_unlock_irqrestore(&pring->ring_lock, iflags);
18982 /* NVMET requests */
18983 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
18984 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
18985 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18987 spin_lock_irqsave(&pring->ring_lock, iflags);
18988 ctxp = pwqe->context2;
18989 sglq = ctxp->ctxbuf->sglq;
18990 if (pwqe->sli4_xritag == NO_XRI) {
18991 pwqe->sli4_lxritag = sglq->sli4_lxritag;
18992 pwqe->sli4_xritag = sglq->sli4_xritag;
18994 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18995 pwqe->sli4_xritag);
18996 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18997 bf_set(wqe_cqid, &wqe->generic.wqe_com,
18998 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18999 ret = lpfc_sli4_wq_put(wq, wqe);
19001 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19004 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19005 spin_unlock_irqrestore(&pring->ring_lock, iflags);