1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
137 * The caller is expected to hold the hbalock when calling this routine.
140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
142 union lpfc_wqe *temp_wqe;
143 struct lpfc_register doorbell;
150 /* sanity check on queue memory */
153 temp_wqe = lpfc_sli4_qe(q, q->host_index);
155 /* If the host has not yet processed the next entry then we are done */
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
162 /* set consumption flag every once in a while */
163 if (!((q->host_index + 1) % q->notify_interval))
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
172 tmp = (uint8_t *)temp_wqe;
174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
175 __raw_writeq(*((uint64_t *)(tmp + i)),
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
218 writel(doorbell.word0, q->db_regaddr);
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
234 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
236 /* sanity check on queue memory */
240 q->hba_index = index;
244 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
245 * @q: The Mailbox Queue to operate on.
246 * @mqe: The Mailbox Queue Entry to put on the Work queue.
248 * This routine will copy the contents of @mqe to the next available entry on
249 * the @q. This function will then ring the Work Queue Doorbell to signal the
250 * HBA to start processing the Work Queue Entry. This function returns 0 if
251 * successful. If no entries are available on @q then this function will return
253 * The caller is expected to hold the hbalock when calling this routine.
256 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
258 struct lpfc_mqe *temp_mqe;
259 struct lpfc_register doorbell;
261 /* sanity check on queue memory */
264 temp_mqe = lpfc_sli4_qe(q, q->host_index);
266 /* If the host has not yet processed the next entry then we are done */
267 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
269 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
270 /* Save off the mailbox pointer for completion */
271 q->phba->mbox = (MAILBOX_t *)temp_mqe;
273 /* Update the host index before invoking device */
274 q->host_index = ((q->host_index + 1) % q->entry_count);
278 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
279 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
280 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
285 * lpfc_sli4_mq_release - Updates internal hba index for MQ
286 * @q: The Mailbox Queue to operate on.
288 * This routine will update the HBA index of a queue to reflect consumption of
289 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
290 * an entry the host calls this function to update the queue's internal
291 * pointers. This routine returns the number of entries that were consumed by
295 lpfc_sli4_mq_release(struct lpfc_queue *q)
297 /* sanity check on queue memory */
301 /* Clear the mailbox pointer for completion */
302 q->phba->mbox = NULL;
303 q->hba_index = ((q->hba_index + 1) % q->entry_count);
308 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
309 * @q: The Event Queue to get the first valid EQE from
311 * This routine will get the first valid Event Queue Entry from @q, update
312 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
313 * the Queue (no more work to do), or the Queue is full of EQEs that have been
314 * processed, but not popped back to the HBA then this routine will return NULL.
316 static struct lpfc_eqe *
317 lpfc_sli4_eq_get(struct lpfc_queue *q)
319 struct lpfc_eqe *eqe;
321 /* sanity check on queue memory */
324 eqe = lpfc_sli4_qe(q, q->host_index);
326 /* If the next EQE is not valid then we are done */
327 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
331 * insert barrier for instruction interlock : data from the hardware
332 * must have the valid bit checked before it can be copied and acted
333 * upon. Speculative instructions were allowing a bcopy at the start
334 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
335 * after our return, to copy data before the valid bit check above
336 * was done. As such, some of the copied data was stale. The barrier
337 * ensures the check is before any data is copied.
344 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
345 * @q: The Event Queue to disable interrupts
349 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
351 struct lpfc_register doorbell;
354 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
355 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
356 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
357 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
358 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
359 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
363 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
364 * @q: The Event Queue to disable interrupts
368 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
370 struct lpfc_register doorbell;
373 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
374 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
378 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
379 * @phba: adapter with EQ
380 * @q: The Event Queue that the host has completed processing for.
381 * @count: Number of elements that have been consumed
382 * @arm: Indicates whether the host wants to arms this CQ.
384 * This routine will notify the HBA, by ringing the doorbell, that count
385 * number of EQEs have been processed. The @arm parameter indicates whether
386 * the queue should be rearmed when ringing the doorbell.
389 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
390 uint32_t count, bool arm)
392 struct lpfc_register doorbell;
394 /* sanity check on queue memory */
395 if (unlikely(!q || (count == 0 && !arm)))
398 /* ring doorbell for number popped */
401 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
402 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
404 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
405 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
406 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
407 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
408 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
409 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
410 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
411 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
412 readl(q->phba->sli4_hba.EQDBregaddr);
416 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
417 * @phba: adapter with EQ
418 * @q: The Event Queue that the host has completed processing for.
419 * @count: Number of elements that have been consumed
420 * @arm: Indicates whether the host wants to arms this CQ.
422 * This routine will notify the HBA, by ringing the doorbell, that count
423 * number of EQEs have been processed. The @arm parameter indicates whether
424 * the queue should be rearmed when ringing the doorbell.
427 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
428 uint32_t count, bool arm)
430 struct lpfc_register doorbell;
432 /* sanity check on queue memory */
433 if (unlikely(!q || (count == 0 && !arm)))
436 /* ring doorbell for number popped */
439 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
440 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
441 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
442 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
443 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
444 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
445 readl(q->phba->sli4_hba.EQDBregaddr);
449 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
450 struct lpfc_eqe *eqe)
452 if (!phba->sli4_hba.pc_sli4_params.eqav)
453 bf_set_le32(lpfc_eqe_valid, eqe, 0);
455 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
457 /* if the index wrapped around, toggle the valid bit */
458 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
459 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
463 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
465 struct lpfc_eqe *eqe = NULL;
466 u32 eq_count = 0, cq_count = 0;
467 struct lpfc_cqe *cqe = NULL;
468 struct lpfc_queue *cq = NULL, *childq = NULL;
471 /* walk all the EQ entries and drop on the floor */
472 eqe = lpfc_sli4_eq_get(eq);
474 /* Get the reference to the corresponding CQ */
475 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
478 list_for_each_entry(childq, &eq->child_list, list) {
479 if (childq->queue_id == cqid) {
484 /* If CQ is valid, iterate through it and drop all the CQEs */
486 cqe = lpfc_sli4_cq_get(cq);
488 __lpfc_sli4_consume_cqe(phba, cq, cqe);
490 cqe = lpfc_sli4_cq_get(cq);
492 /* Clear and re-arm the CQ */
493 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
497 __lpfc_sli4_consume_eqe(phba, eq, eqe);
499 eqe = lpfc_sli4_eq_get(eq);
502 /* Clear and re-arm the EQ */
503 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
507 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
510 struct lpfc_eqe *eqe;
511 int count = 0, consumed = 0;
513 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
516 eqe = lpfc_sli4_eq_get(eq);
518 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
519 __lpfc_sli4_consume_eqe(phba, eq, eqe);
522 if (!(++count % eq->max_proc_limit))
525 if (!(count % eq->notify_interval)) {
526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
531 eqe = lpfc_sli4_eq_get(eq);
533 eq->EQ_processed += count;
535 /* Track the max number of EQEs processed in 1 intr */
536 if (count > eq->EQ_max_eqe)
537 eq->EQ_max_eqe = count;
539 xchg(&eq->queue_claimed, 0);
542 /* Always clear the EQ. */
543 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
549 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
550 * @q: The Completion Queue to get the first valid CQE from
552 * This routine will get the first valid Completion Queue Entry from @q, update
553 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
554 * the Queue (no more work to do), or the Queue is full of CQEs that have been
555 * processed, but not popped back to the HBA then this routine will return NULL.
557 static struct lpfc_cqe *
558 lpfc_sli4_cq_get(struct lpfc_queue *q)
560 struct lpfc_cqe *cqe;
562 /* sanity check on queue memory */
565 cqe = lpfc_sli4_qe(q, q->host_index);
567 /* If the next CQE is not valid then we are done */
568 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
572 * insert barrier for instruction interlock : data from the hardware
573 * must have the valid bit checked before it can be copied and acted
574 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
575 * instructions allowing action on content before valid bit checked,
576 * add barrier here as well. May not be needed as "content" is a
577 * single 32-bit entity here (vs multi word structure for cq's).
584 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
585 struct lpfc_cqe *cqe)
587 if (!phba->sli4_hba.pc_sli4_params.cqav)
588 bf_set_le32(lpfc_cqe_valid, cqe, 0);
590 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
592 /* if the index wrapped around, toggle the valid bit */
593 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
594 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
598 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
599 * @phba: the adapter with the CQ
600 * @q: The Completion Queue that the host has completed processing for.
601 * @count: the number of elements that were consumed
602 * @arm: Indicates whether the host wants to arms this CQ.
604 * This routine will notify the HBA, by ringing the doorbell, that the
605 * CQEs have been processed. The @arm parameter specifies whether the
606 * queue should be rearmed when ringing the doorbell.
609 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
610 uint32_t count, bool arm)
612 struct lpfc_register doorbell;
614 /* sanity check on queue memory */
615 if (unlikely(!q || (count == 0 && !arm)))
618 /* ring doorbell for number popped */
621 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
622 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
623 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
624 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
625 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
626 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
627 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
631 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
632 * @phba: the adapter with the CQ
633 * @q: The Completion Queue that the host has completed processing for.
634 * @count: the number of elements that were consumed
635 * @arm: Indicates whether the host wants to arms this CQ.
637 * This routine will notify the HBA, by ringing the doorbell, that the
638 * CQEs have been processed. The @arm parameter specifies whether the
639 * queue should be rearmed when ringing the doorbell.
642 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
643 uint32_t count, bool arm)
645 struct lpfc_register doorbell;
647 /* sanity check on queue memory */
648 if (unlikely(!q || (count == 0 && !arm)))
651 /* ring doorbell for number popped */
654 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
655 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
656 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
657 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
663 * This routine will copy the contents of @wqe to the next available entry on
664 * the @q. This function will then ring the Receive Queue Doorbell to signal the
665 * HBA to start processing the Receive Queue Entry. This function returns the
666 * index that the rqe was copied to if successful. If no entries are available
667 * on @q then this function will return -ENOMEM.
668 * The caller is expected to hold the hbalock when calling this routine.
671 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
672 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
674 struct lpfc_rqe *temp_hrqe;
675 struct lpfc_rqe *temp_drqe;
676 struct lpfc_register doorbell;
680 /* sanity check on queue memory */
681 if (unlikely(!hq) || unlikely(!dq))
683 hq_put_index = hq->host_index;
684 dq_put_index = dq->host_index;
685 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
686 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
688 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
690 if (hq_put_index != dq_put_index)
692 /* If the host has not yet processed the next entry then we are done */
693 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
695 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
696 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
698 /* Update the host index to point to the next slot */
699 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
700 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
703 /* Ring The Header Receive Queue Doorbell */
704 if (!(hq->host_index % hq->notify_interval)) {
706 if (hq->db_format == LPFC_DB_RING_FORMAT) {
707 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
708 hq->notify_interval);
709 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
710 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
711 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
712 hq->notify_interval);
713 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
715 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
719 writel(doorbell.word0, hq->db_regaddr);
725 * lpfc_sli4_rq_release - Updates internal hba index for RQ
727 * This routine will update the HBA index of a queue to reflect consumption of
728 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
729 * consumed an entry the host calls this function to update the queue's
730 * internal pointers. This routine returns the number of entries that were
731 * consumed by the HBA.
734 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
736 /* sanity check on queue memory */
737 if (unlikely(!hq) || unlikely(!dq))
740 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
742 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
743 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
748 * lpfc_cmd_iocb - Get next command iocb entry in the ring
749 * @phba: Pointer to HBA context object.
750 * @pring: Pointer to driver SLI ring object.
752 * This function returns pointer to next command iocb entry
753 * in the command ring. The caller must hold hbalock to prevent
754 * other threads consume the next command iocb.
755 * SLI-2/SLI-3 provide different sized iocbs.
757 static inline IOCB_t *
758 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
760 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
761 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
765 * lpfc_resp_iocb - Get next response iocb entry in the ring
766 * @phba: Pointer to HBA context object.
767 * @pring: Pointer to driver SLI ring object.
769 * This function returns pointer to next response iocb entry
770 * in the response ring. The caller must hold hbalock to make sure
771 * that no other thread consume the next response iocb.
772 * SLI-2/SLI-3 provide different sized iocbs.
774 static inline IOCB_t *
775 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
777 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
778 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
782 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
783 * @phba: Pointer to HBA context object.
785 * This function is called with hbalock held. This function
786 * allocates a new driver iocb object from the iocb pool. If the
787 * allocation is successful, it returns pointer to the newly
788 * allocated iocb object else it returns NULL.
791 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
793 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
794 struct lpfc_iocbq * iocbq = NULL;
796 lockdep_assert_held(&phba->hbalock);
798 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
801 if (phba->iocb_cnt > phba->iocb_max)
802 phba->iocb_max = phba->iocb_cnt;
807 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
808 * @phba: Pointer to HBA context object.
809 * @xritag: XRI value.
811 * This function clears the sglq pointer from the array of acive
812 * sglq's. The xritag that is passed in is used to index into the
813 * array. Before the xritag can be used it needs to be adjusted
814 * by subtracting the xribase.
816 * Returns sglq ponter = success, NULL = Failure.
819 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
821 struct lpfc_sglq *sglq;
823 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
824 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
829 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
830 * @phba: Pointer to HBA context object.
831 * @xritag: XRI value.
833 * This function returns the sglq pointer from the array of acive
834 * sglq's. The xritag that is passed in is used to index into the
835 * array. Before the xritag can be used it needs to be adjusted
836 * by subtracting the xribase.
838 * Returns sglq ponter = success, NULL = Failure.
841 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
843 struct lpfc_sglq *sglq;
845 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
850 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
851 * @phba: Pointer to HBA context object.
852 * @xritag: xri used in this exchange.
853 * @rrq: The RRQ to be cleared.
857 lpfc_clr_rrq_active(struct lpfc_hba *phba,
859 struct lpfc_node_rrq *rrq)
861 struct lpfc_nodelist *ndlp = NULL;
863 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
864 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
866 /* The target DID could have been swapped (cable swap)
867 * we should use the ndlp from the findnode if it is
870 if ((!ndlp) && rrq->ndlp)
876 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
879 rrq->rrq_stop_time = 0;
882 mempool_free(rrq, phba->rrq_pool);
886 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
887 * @phba: Pointer to HBA context object.
889 * This function is called with hbalock held. This function
890 * Checks if stop_time (ratov from setting rrq active) has
891 * been reached, if it has and the send_rrq flag is set then
892 * it will call lpfc_send_rrq. If the send_rrq flag is not set
893 * then it will just call the routine to clear the rrq and
894 * free the rrq resource.
895 * The timer is set to the next rrq that is going to expire before
896 * leaving the routine.
900 lpfc_handle_rrq_active(struct lpfc_hba *phba)
902 struct lpfc_node_rrq *rrq;
903 struct lpfc_node_rrq *nextrrq;
904 unsigned long next_time;
905 unsigned long iflags;
908 spin_lock_irqsave(&phba->hbalock, iflags);
909 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
910 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
911 list_for_each_entry_safe(rrq, nextrrq,
912 &phba->active_rrq_list, list) {
913 if (time_after(jiffies, rrq->rrq_stop_time))
914 list_move(&rrq->list, &send_rrq);
915 else if (time_before(rrq->rrq_stop_time, next_time))
916 next_time = rrq->rrq_stop_time;
918 spin_unlock_irqrestore(&phba->hbalock, iflags);
919 if ((!list_empty(&phba->active_rrq_list)) &&
920 (!(phba->pport->load_flag & FC_UNLOADING)))
921 mod_timer(&phba->rrq_tmr, next_time);
922 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
923 list_del(&rrq->list);
924 if (!rrq->send_rrq) {
925 /* this call will free the rrq */
926 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
927 } else if (lpfc_send_rrq(phba, rrq)) {
928 /* if we send the rrq then the completion handler
929 * will clear the bit in the xribitmap.
931 lpfc_clr_rrq_active(phba, rrq->xritag,
938 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
939 * @vport: Pointer to vport context object.
940 * @xri: The xri used in the exchange.
941 * @did: The targets DID for this exchange.
943 * returns NULL = rrq not found in the phba->active_rrq_list.
944 * rrq = rrq for this xri and target.
946 struct lpfc_node_rrq *
947 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
949 struct lpfc_hba *phba = vport->phba;
950 struct lpfc_node_rrq *rrq;
951 struct lpfc_node_rrq *nextrrq;
952 unsigned long iflags;
954 if (phba->sli_rev != LPFC_SLI_REV4)
956 spin_lock_irqsave(&phba->hbalock, iflags);
957 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
958 if (rrq->vport == vport && rrq->xritag == xri &&
959 rrq->nlp_DID == did){
960 list_del(&rrq->list);
961 spin_unlock_irqrestore(&phba->hbalock, iflags);
965 spin_unlock_irqrestore(&phba->hbalock, iflags);
970 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
971 * @vport: Pointer to vport context object.
972 * @ndlp: Pointer to the lpfc_node_list structure.
973 * If ndlp is NULL Remove all active RRQs for this vport from the
974 * phba->active_rrq_list and clear the rrq.
975 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
978 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
981 struct lpfc_hba *phba = vport->phba;
982 struct lpfc_node_rrq *rrq;
983 struct lpfc_node_rrq *nextrrq;
984 unsigned long iflags;
987 if (phba->sli_rev != LPFC_SLI_REV4)
990 lpfc_sli4_vport_delete_els_xri_aborted(vport);
991 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
993 spin_lock_irqsave(&phba->hbalock, iflags);
994 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
995 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
996 list_move(&rrq->list, &rrq_list);
997 spin_unlock_irqrestore(&phba->hbalock, iflags);
999 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1000 list_del(&rrq->list);
1001 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1006 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1007 * @phba: Pointer to HBA context object.
1008 * @ndlp: Targets nodelist pointer for this exchange.
1009 * @xritag: the xri in the bitmap to test.
1011 * This function returns:
1012 * 0 = rrq not active for this xri
1013 * 1 = rrq is valid for this xri.
1016 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1021 if (!ndlp->active_rrqs_xri_bitmap)
1023 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1030 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1031 * @phba: Pointer to HBA context object.
1032 * @ndlp: nodelist pointer for this target.
1033 * @xritag: xri used in this exchange.
1034 * @rxid: Remote Exchange ID.
1035 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1037 * This function takes the hbalock.
1038 * The active bit is always set in the active rrq xri_bitmap even
1039 * if there is no slot avaiable for the other rrq information.
1041 * returns 0 rrq actived for this xri
1042 * < 0 No memory or invalid ndlp.
1045 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1046 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1048 unsigned long iflags;
1049 struct lpfc_node_rrq *rrq;
1055 if (!phba->cfg_enable_rrq)
1058 spin_lock_irqsave(&phba->hbalock, iflags);
1059 if (phba->pport->load_flag & FC_UNLOADING) {
1060 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1065 * set the active bit even if there is no mem available.
1067 if (NLP_CHK_FREE_REQ(ndlp))
1070 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1073 if (!ndlp->active_rrqs_xri_bitmap)
1076 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1079 spin_unlock_irqrestore(&phba->hbalock, iflags);
1080 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1083 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1084 " DID:0x%x Send:%d\n",
1085 xritag, rxid, ndlp->nlp_DID, send_rrq);
1088 if (phba->cfg_enable_rrq == 1)
1089 rrq->send_rrq = send_rrq;
1092 rrq->xritag = xritag;
1093 rrq->rrq_stop_time = jiffies +
1094 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1096 rrq->nlp_DID = ndlp->nlp_DID;
1097 rrq->vport = ndlp->vport;
1099 spin_lock_irqsave(&phba->hbalock, iflags);
1100 empty = list_empty(&phba->active_rrq_list);
1101 list_add_tail(&rrq->list, &phba->active_rrq_list);
1102 phba->hba_flag |= HBA_RRQ_ACTIVE;
1104 lpfc_worker_wake_up(phba);
1105 spin_unlock_irqrestore(&phba->hbalock, iflags);
1108 spin_unlock_irqrestore(&phba->hbalock, iflags);
1109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1110 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1111 " DID:0x%x Send:%d\n",
1112 xritag, rxid, ndlp->nlp_DID, send_rrq);
1117 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1118 * @phba: Pointer to HBA context object.
1119 * @piocbq: Pointer to the iocbq.
1121 * The driver calls this function with either the nvme ls ring lock
1122 * or the fc els ring lock held depending on the iocb usage. This function
1123 * gets a new driver sglq object from the sglq list. If the list is not empty
1124 * then it is successful, it returns pointer to the newly allocated sglq
1125 * object else it returns NULL.
1127 static struct lpfc_sglq *
1128 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1130 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1131 struct lpfc_sglq *sglq = NULL;
1132 struct lpfc_sglq *start_sglq = NULL;
1133 struct lpfc_io_buf *lpfc_cmd;
1134 struct lpfc_nodelist *ndlp;
1135 struct lpfc_sli_ring *pring = NULL;
1138 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1139 pring = phba->sli4_hba.nvmels_wq->pring;
1141 pring = lpfc_phba_elsring(phba);
1143 lockdep_assert_held(&pring->ring_lock);
1145 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1146 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1147 ndlp = lpfc_cmd->rdata->pnode;
1148 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1149 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1150 ndlp = piocbq->context_un.ndlp;
1151 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1152 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1155 ndlp = piocbq->context_un.ndlp;
1157 ndlp = piocbq->context1;
1160 spin_lock(&phba->sli4_hba.sgl_list_lock);
1161 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1166 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1167 test_bit(sglq->sli4_lxritag,
1168 ndlp->active_rrqs_xri_bitmap)) {
1169 /* This xri has an rrq outstanding for this DID.
1170 * put it back in the list and get another xri.
1172 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1174 list_remove_head(lpfc_els_sgl_list, sglq,
1175 struct lpfc_sglq, list);
1176 if (sglq == start_sglq) {
1177 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1185 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1186 sglq->state = SGL_ALLOCATED;
1188 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1193 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1194 * @phba: Pointer to HBA context object.
1195 * @piocbq: Pointer to the iocbq.
1197 * This function is called with the sgl_list lock held. This function
1198 * gets a new driver sglq object from the sglq list. If the
1199 * list is not empty then it is successful, it returns pointer to the newly
1200 * allocated sglq object else it returns NULL.
1203 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1205 struct list_head *lpfc_nvmet_sgl_list;
1206 struct lpfc_sglq *sglq = NULL;
1208 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1210 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1212 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1215 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1216 sglq->state = SGL_ALLOCATED;
1221 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1222 * @phba: Pointer to HBA context object.
1224 * This function is called with no lock held. This function
1225 * allocates a new driver iocb object from the iocb pool. If the
1226 * allocation is successful, it returns pointer to the newly
1227 * allocated iocb object else it returns NULL.
1230 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1232 struct lpfc_iocbq * iocbq = NULL;
1233 unsigned long iflags;
1235 spin_lock_irqsave(&phba->hbalock, iflags);
1236 iocbq = __lpfc_sli_get_iocbq(phba);
1237 spin_unlock_irqrestore(&phba->hbalock, iflags);
1242 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1243 * @phba: Pointer to HBA context object.
1244 * @iocbq: Pointer to driver iocb object.
1246 * This function is called to release the driver iocb object
1247 * to the iocb pool. The iotag in the iocb object
1248 * does not change for each use of the iocb object. This function
1249 * clears all other fields of the iocb object when it is freed.
1250 * The sqlq structure that holds the xritag and phys and virtual
1251 * mappings for the scatter gather list is retrieved from the
1252 * active array of sglq. The get of the sglq pointer also clears
1253 * the entry in the array. If the status of the IO indiactes that
1254 * this IO was aborted then the sglq entry it put on the
1255 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1256 * IO has good status or fails for any other reason then the sglq
1257 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1258 * asserted held in the code path calling this routine.
1261 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1263 struct lpfc_sglq *sglq;
1264 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1265 unsigned long iflag = 0;
1266 struct lpfc_sli_ring *pring;
1268 if (iocbq->sli4_xritag == NO_XRI)
1271 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1275 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1276 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1278 sglq->state = SGL_FREED;
1280 list_add_tail(&sglq->list,
1281 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1282 spin_unlock_irqrestore(
1283 &phba->sli4_hba.sgl_list_lock, iflag);
1287 pring = phba->sli4_hba.els_wq->pring;
1288 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1289 (sglq->state != SGL_XRI_ABORTED)) {
1290 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1292 list_add(&sglq->list,
1293 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1294 spin_unlock_irqrestore(
1295 &phba->sli4_hba.sgl_list_lock, iflag);
1297 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1299 sglq->state = SGL_FREED;
1301 list_add_tail(&sglq->list,
1302 &phba->sli4_hba.lpfc_els_sgl_list);
1303 spin_unlock_irqrestore(
1304 &phba->sli4_hba.sgl_list_lock, iflag);
1306 /* Check if TXQ queue needs to be serviced */
1307 if (!list_empty(&pring->txq))
1308 lpfc_worker_wake_up(phba);
1314 * Clean all volatile data fields, preserve iotag and node struct.
1316 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1317 iocbq->sli4_lxritag = NO_XRI;
1318 iocbq->sli4_xritag = NO_XRI;
1319 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1321 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1326 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1327 * @phba: Pointer to HBA context object.
1328 * @iocbq: Pointer to driver iocb object.
1330 * This function is called to release the driver iocb object to the
1331 * iocb pool. The iotag in the iocb object does not change for each
1332 * use of the iocb object. This function clears all other fields of
1333 * the iocb object when it is freed. The hbalock is asserted held in
1334 * the code path calling this routine.
1337 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1339 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1342 * Clean all volatile data fields, preserve iotag and node struct.
1344 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1345 iocbq->sli4_xritag = NO_XRI;
1346 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1350 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1351 * @phba: Pointer to HBA context object.
1352 * @iocbq: Pointer to driver iocb object.
1354 * This function is called with hbalock held to release driver
1355 * iocb object to the iocb pool. The iotag in the iocb object
1356 * does not change for each use of the iocb object. This function
1357 * clears all other fields of the iocb object when it is freed.
1360 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1362 lockdep_assert_held(&phba->hbalock);
1364 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1369 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1370 * @phba: Pointer to HBA context object.
1371 * @iocbq: Pointer to driver iocb object.
1373 * This function is called with no lock held to release the iocb to
1377 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1379 unsigned long iflags;
1382 * Clean all volatile data fields, preserve iotag and node struct.
1384 spin_lock_irqsave(&phba->hbalock, iflags);
1385 __lpfc_sli_release_iocbq(phba, iocbq);
1386 spin_unlock_irqrestore(&phba->hbalock, iflags);
1390 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1391 * @phba: Pointer to HBA context object.
1392 * @iocblist: List of IOCBs.
1393 * @ulpstatus: ULP status in IOCB command field.
1394 * @ulpWord4: ULP word-4 in IOCB command field.
1396 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1397 * on the list by invoking the complete callback function associated with the
1398 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1402 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1403 uint32_t ulpstatus, uint32_t ulpWord4)
1405 struct lpfc_iocbq *piocb;
1407 while (!list_empty(iocblist)) {
1408 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1409 if (!piocb->iocb_cmpl) {
1410 if (piocb->iocb_flag & LPFC_IO_NVME)
1411 lpfc_nvme_cancel_iocb(phba, piocb);
1413 lpfc_sli_release_iocbq(phba, piocb);
1415 piocb->iocb.ulpStatus = ulpstatus;
1416 piocb->iocb.un.ulpWord[4] = ulpWord4;
1417 (piocb->iocb_cmpl) (phba, piocb, piocb);
1424 * lpfc_sli_iocb_cmd_type - Get the iocb type
1425 * @iocb_cmnd: iocb command code.
1427 * This function is called by ring event handler function to get the iocb type.
1428 * This function translates the iocb command to an iocb command type used to
1429 * decide the final disposition of each completed IOCB.
1430 * The function returns
1431 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1432 * LPFC_SOL_IOCB if it is a solicited iocb completion
1433 * LPFC_ABORT_IOCB if it is an abort iocb
1434 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1436 * The caller is not required to hold any lock.
1438 static lpfc_iocb_type
1439 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1441 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1443 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1446 switch (iocb_cmnd) {
1447 case CMD_XMIT_SEQUENCE_CR:
1448 case CMD_XMIT_SEQUENCE_CX:
1449 case CMD_XMIT_BCAST_CN:
1450 case CMD_XMIT_BCAST_CX:
1451 case CMD_ELS_REQUEST_CR:
1452 case CMD_ELS_REQUEST_CX:
1453 case CMD_CREATE_XRI_CR:
1454 case CMD_CREATE_XRI_CX:
1455 case CMD_GET_RPI_CN:
1456 case CMD_XMIT_ELS_RSP_CX:
1457 case CMD_GET_RPI_CR:
1458 case CMD_FCP_IWRITE_CR:
1459 case CMD_FCP_IWRITE_CX:
1460 case CMD_FCP_IREAD_CR:
1461 case CMD_FCP_IREAD_CX:
1462 case CMD_FCP_ICMND_CR:
1463 case CMD_FCP_ICMND_CX:
1464 case CMD_FCP_TSEND_CX:
1465 case CMD_FCP_TRSP_CX:
1466 case CMD_FCP_TRECEIVE_CX:
1467 case CMD_FCP_AUTO_TRSP_CX:
1468 case CMD_ADAPTER_MSG:
1469 case CMD_ADAPTER_DUMP:
1470 case CMD_XMIT_SEQUENCE64_CR:
1471 case CMD_XMIT_SEQUENCE64_CX:
1472 case CMD_XMIT_BCAST64_CN:
1473 case CMD_XMIT_BCAST64_CX:
1474 case CMD_ELS_REQUEST64_CR:
1475 case CMD_ELS_REQUEST64_CX:
1476 case CMD_FCP_IWRITE64_CR:
1477 case CMD_FCP_IWRITE64_CX:
1478 case CMD_FCP_IREAD64_CR:
1479 case CMD_FCP_IREAD64_CX:
1480 case CMD_FCP_ICMND64_CR:
1481 case CMD_FCP_ICMND64_CX:
1482 case CMD_FCP_TSEND64_CX:
1483 case CMD_FCP_TRSP64_CX:
1484 case CMD_FCP_TRECEIVE64_CX:
1485 case CMD_GEN_REQUEST64_CR:
1486 case CMD_GEN_REQUEST64_CX:
1487 case CMD_XMIT_ELS_RSP64_CX:
1488 case DSSCMD_IWRITE64_CR:
1489 case DSSCMD_IWRITE64_CX:
1490 case DSSCMD_IREAD64_CR:
1491 case DSSCMD_IREAD64_CX:
1492 case CMD_SEND_FRAME:
1493 type = LPFC_SOL_IOCB;
1495 case CMD_ABORT_XRI_CN:
1496 case CMD_ABORT_XRI_CX:
1497 case CMD_CLOSE_XRI_CN:
1498 case CMD_CLOSE_XRI_CX:
1499 case CMD_XRI_ABORTED_CX:
1500 case CMD_ABORT_MXRI64_CN:
1501 case CMD_XMIT_BLS_RSP64_CX:
1502 type = LPFC_ABORT_IOCB;
1504 case CMD_RCV_SEQUENCE_CX:
1505 case CMD_RCV_ELS_REQ_CX:
1506 case CMD_RCV_SEQUENCE64_CX:
1507 case CMD_RCV_ELS_REQ64_CX:
1508 case CMD_ASYNC_STATUS:
1509 case CMD_IOCB_RCV_SEQ64_CX:
1510 case CMD_IOCB_RCV_ELS64_CX:
1511 case CMD_IOCB_RCV_CONT64_CX:
1512 case CMD_IOCB_RET_XRI64_CX:
1513 type = LPFC_UNSOL_IOCB;
1515 case CMD_IOCB_XMIT_MSEQ64_CR:
1516 case CMD_IOCB_XMIT_MSEQ64_CX:
1517 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1518 case CMD_IOCB_RCV_ELS_LIST64_CX:
1519 case CMD_IOCB_CLOSE_EXTENDED_CN:
1520 case CMD_IOCB_ABORT_EXTENDED_CN:
1521 case CMD_IOCB_RET_HBQE64_CN:
1522 case CMD_IOCB_FCP_IBIDIR64_CR:
1523 case CMD_IOCB_FCP_IBIDIR64_CX:
1524 case CMD_IOCB_FCP_ITASKMGT64_CX:
1525 case CMD_IOCB_LOGENTRY_CN:
1526 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1527 printk("%s - Unhandled SLI-3 Command x%x\n",
1528 __func__, iocb_cmnd);
1529 type = LPFC_UNKNOWN_IOCB;
1532 type = LPFC_UNKNOWN_IOCB;
1540 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1541 * @phba: Pointer to HBA context object.
1543 * This function is called from SLI initialization code
1544 * to configure every ring of the HBA's SLI interface. The
1545 * caller is not required to hold any lock. This function issues
1546 * a config_ring mailbox command for each ring.
1547 * This function returns zero if successful else returns a negative
1551 lpfc_sli_ring_map(struct lpfc_hba *phba)
1553 struct lpfc_sli *psli = &phba->sli;
1558 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1562 phba->link_state = LPFC_INIT_MBX_CMDS;
1563 for (i = 0; i < psli->num_rings; i++) {
1564 lpfc_config_ring(phba, i, pmb);
1565 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1566 if (rc != MBX_SUCCESS) {
1567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1568 "0446 Adapter failed to init (%d), "
1569 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1571 rc, pmbox->mbxCommand,
1572 pmbox->mbxStatus, i);
1573 phba->link_state = LPFC_HBA_ERROR;
1578 mempool_free(pmb, phba->mbox_mem_pool);
1583 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1584 * @phba: Pointer to HBA context object.
1585 * @pring: Pointer to driver SLI ring object.
1586 * @piocb: Pointer to the driver iocb object.
1588 * The driver calls this function with the hbalock held for SLI3 ports or
1589 * the ring lock held for SLI4 ports. The function adds the
1590 * new iocb to txcmplq of the given ring. This function always returns
1591 * 0. If this function is called for ELS ring, this function checks if
1592 * there is a vport associated with the ELS command. This function also
1593 * starts els_tmofunc timer if this is an ELS command.
1596 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1597 struct lpfc_iocbq *piocb)
1599 if (phba->sli_rev == LPFC_SLI_REV4)
1600 lockdep_assert_held(&pring->ring_lock);
1602 lockdep_assert_held(&phba->hbalock);
1606 list_add_tail(&piocb->list, &pring->txcmplq);
1607 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1608 pring->txcmplq_cnt++;
1610 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1611 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1612 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1613 BUG_ON(!piocb->vport);
1614 if (!(piocb->vport->load_flag & FC_UNLOADING))
1615 mod_timer(&piocb->vport->els_tmofunc,
1617 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1624 * lpfc_sli_ringtx_get - Get first element of the txq
1625 * @phba: Pointer to HBA context object.
1626 * @pring: Pointer to driver SLI ring object.
1628 * This function is called with hbalock held to get next
1629 * iocb in txq of the given ring. If there is any iocb in
1630 * the txq, the function returns first iocb in the list after
1631 * removing the iocb from the list, else it returns NULL.
1634 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1636 struct lpfc_iocbq *cmd_iocb;
1638 lockdep_assert_held(&phba->hbalock);
1640 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1645 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1646 * @phba: Pointer to HBA context object.
1647 * @pring: Pointer to driver SLI ring object.
1649 * This function is called with hbalock held and the caller must post the
1650 * iocb without releasing the lock. If the caller releases the lock,
1651 * iocb slot returned by the function is not guaranteed to be available.
1652 * The function returns pointer to the next available iocb slot if there
1653 * is available slot in the ring, else it returns NULL.
1654 * If the get index of the ring is ahead of the put index, the function
1655 * will post an error attention event to the worker thread to take the
1656 * HBA to offline state.
1659 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1661 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1662 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1664 lockdep_assert_held(&phba->hbalock);
1666 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1667 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1668 pring->sli.sli3.next_cmdidx = 0;
1670 if (unlikely(pring->sli.sli3.local_getidx ==
1671 pring->sli.sli3.next_cmdidx)) {
1673 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1675 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1677 "0315 Ring %d issue: portCmdGet %d "
1678 "is bigger than cmd ring %d\n",
1680 pring->sli.sli3.local_getidx,
1683 phba->link_state = LPFC_HBA_ERROR;
1685 * All error attention handlers are posted to
1688 phba->work_ha |= HA_ERATT;
1689 phba->work_hs = HS_FFER3;
1691 lpfc_worker_wake_up(phba);
1696 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1700 return lpfc_cmd_iocb(phba, pring);
1704 * lpfc_sli_next_iotag - Get an iotag for the iocb
1705 * @phba: Pointer to HBA context object.
1706 * @iocbq: Pointer to driver iocb object.
1708 * This function gets an iotag for the iocb. If there is no unused iotag and
1709 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1710 * array and assigns a new iotag.
1711 * The function returns the allocated iotag if successful, else returns zero.
1712 * Zero is not a valid iotag.
1713 * The caller is not required to hold any lock.
1716 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1718 struct lpfc_iocbq **new_arr;
1719 struct lpfc_iocbq **old_arr;
1721 struct lpfc_sli *psli = &phba->sli;
1724 spin_lock_irq(&phba->hbalock);
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1732 } else if (psli->iocbq_lookup_len < (0xffff
1733 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1734 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1735 spin_unlock_irq(&phba->hbalock);
1736 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1739 spin_lock_irq(&phba->hbalock);
1740 old_arr = psli->iocbq_lookup;
1741 if (new_len <= psli->iocbq_lookup_len) {
1742 /* highly unprobable case */
1744 iotag = psli->last_iotag;
1745 if(++iotag < psli->iocbq_lookup_len) {
1746 psli->last_iotag = iotag;
1747 psli->iocbq_lookup[iotag] = iocbq;
1748 spin_unlock_irq(&phba->hbalock);
1749 iocbq->iotag = iotag;
1752 spin_unlock_irq(&phba->hbalock);
1755 if (psli->iocbq_lookup)
1756 memcpy(new_arr, old_arr,
1757 ((psli->last_iotag + 1) *
1758 sizeof (struct lpfc_iocbq *)));
1759 psli->iocbq_lookup = new_arr;
1760 psli->iocbq_lookup_len = new_len;
1761 psli->last_iotag = iotag;
1762 psli->iocbq_lookup[iotag] = iocbq;
1763 spin_unlock_irq(&phba->hbalock);
1764 iocbq->iotag = iotag;
1769 spin_unlock_irq(&phba->hbalock);
1771 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1772 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1779 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1780 * @phba: Pointer to HBA context object.
1781 * @pring: Pointer to driver SLI ring object.
1782 * @iocb: Pointer to iocb slot in the ring.
1783 * @nextiocb: Pointer to driver iocb object which need to be
1784 * posted to firmware.
1786 * This function is called to post a new iocb to the firmware. This
1787 * function copies the new iocb to ring iocb slot and updates the
1788 * ring pointers. It adds the new iocb to txcmplq if there is
1789 * a completion call back for this iocb else the function will free the
1790 * iocb object. The hbalock is asserted held in the code path calling
1794 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1795 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1800 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1803 if (pring->ringno == LPFC_ELS_RING) {
1804 lpfc_debugfs_slow_ring_trc(phba,
1805 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1806 *(((uint32_t *) &nextiocb->iocb) + 4),
1807 *(((uint32_t *) &nextiocb->iocb) + 6),
1808 *(((uint32_t *) &nextiocb->iocb) + 7));
1812 * Issue iocb command to adapter
1814 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1816 pring->stats.iocb_cmd++;
1819 * If there is no completion routine to call, we can release the
1820 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1821 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1823 if (nextiocb->iocb_cmpl)
1824 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1826 __lpfc_sli_release_iocbq(phba, nextiocb);
1829 * Let the HBA know what IOCB slot will be the next one the
1830 * driver will put a command into.
1832 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1833 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1837 * lpfc_sli_update_full_ring - Update the chip attention register
1838 * @phba: Pointer to HBA context object.
1839 * @pring: Pointer to driver SLI ring object.
1841 * The caller is not required to hold any lock for calling this function.
1842 * This function updates the chip attention bits for the ring to inform firmware
1843 * that there are pending work to be done for this ring and requests an
1844 * interrupt when there is space available in the ring. This function is
1845 * called when the driver is unable to post more iocbs to the ring due
1846 * to unavailability of space in the ring.
1849 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1851 int ringno = pring->ringno;
1853 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1858 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1859 * The HBA will tell us when an IOCB entry is available.
1861 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1862 readl(phba->CAregaddr); /* flush */
1864 pring->stats.iocb_cmd_full++;
1868 * lpfc_sli_update_ring - Update chip attention register
1869 * @phba: Pointer to HBA context object.
1870 * @pring: Pointer to driver SLI ring object.
1872 * This function updates the chip attention register bit for the
1873 * given ring to inform HBA that there is more work to be done
1874 * in this ring. The caller is not required to hold any lock.
1877 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1879 int ringno = pring->ringno;
1882 * Tell the HBA that there is work to do in this ring.
1884 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1886 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1887 readl(phba->CAregaddr); /* flush */
1892 * lpfc_sli_resume_iocb - Process iocbs in the txq
1893 * @phba: Pointer to HBA context object.
1894 * @pring: Pointer to driver SLI ring object.
1896 * This function is called with hbalock held to post pending iocbs
1897 * in the txq to the firmware. This function is called when driver
1898 * detects space available in the ring.
1901 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1904 struct lpfc_iocbq *nextiocb;
1906 lockdep_assert_held(&phba->hbalock);
1910 * (a) there is anything on the txq to send
1912 * (c) link attention events can be processed (fcp ring only)
1913 * (d) IOCB processing is not blocked by the outstanding mbox command.
1916 if (lpfc_is_link_up(phba) &&
1917 (!list_empty(&pring->txq)) &&
1918 (pring->ringno != LPFC_FCP_RING ||
1919 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1921 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1922 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1923 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1926 lpfc_sli_update_ring(phba, pring);
1928 lpfc_sli_update_full_ring(phba, pring);
1935 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1936 * @phba: Pointer to HBA context object.
1937 * @hbqno: HBQ number.
1939 * This function is called with hbalock held to get the next
1940 * available slot for the given HBQ. If there is free slot
1941 * available for the HBQ it will return pointer to the next available
1942 * HBQ entry else it will return NULL.
1944 static struct lpfc_hbq_entry *
1945 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1947 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1949 lockdep_assert_held(&phba->hbalock);
1951 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1952 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1953 hbqp->next_hbqPutIdx = 0;
1955 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1956 uint32_t raw_index = phba->hbq_get[hbqno];
1957 uint32_t getidx = le32_to_cpu(raw_index);
1959 hbqp->local_hbqGetIdx = getidx;
1961 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963 "1802 HBQ %d: local_hbqGetIdx "
1964 "%u is > than hbqp->entry_count %u\n",
1965 hbqno, hbqp->local_hbqGetIdx,
1968 phba->link_state = LPFC_HBA_ERROR;
1972 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1976 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1981 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1982 * @phba: Pointer to HBA context object.
1984 * This function is called with no lock held to free all the
1985 * hbq buffers while uninitializing the SLI interface. It also
1986 * frees the HBQ buffers returned by the firmware but not yet
1987 * processed by the upper layers.
1990 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1992 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1993 struct hbq_dmabuf *hbq_buf;
1994 unsigned long flags;
1997 hbq_count = lpfc_sli_hbq_count();
1998 /* Return all memory used by all HBQs */
1999 spin_lock_irqsave(&phba->hbalock, flags);
2000 for (i = 0; i < hbq_count; ++i) {
2001 list_for_each_entry_safe(dmabuf, next_dmabuf,
2002 &phba->hbqs[i].hbq_buffer_list, list) {
2003 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2004 list_del(&hbq_buf->dbuf.list);
2005 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2007 phba->hbqs[i].buffer_count = 0;
2010 /* Mark the HBQs not in use */
2011 phba->hbq_in_use = 0;
2012 spin_unlock_irqrestore(&phba->hbalock, flags);
2016 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2017 * @phba: Pointer to HBA context object.
2018 * @hbqno: HBQ number.
2019 * @hbq_buf: Pointer to HBQ buffer.
2021 * This function is called with the hbalock held to post a
2022 * hbq buffer to the firmware. If the function finds an empty
2023 * slot in the HBQ, it will post the buffer. The function will return
2024 * pointer to the hbq entry if it successfully post the buffer
2025 * else it will return NULL.
2028 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2029 struct hbq_dmabuf *hbq_buf)
2031 lockdep_assert_held(&phba->hbalock);
2032 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2036 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2037 * @phba: Pointer to HBA context object.
2038 * @hbqno: HBQ number.
2039 * @hbq_buf: Pointer to HBQ buffer.
2041 * This function is called with the hbalock held to post a hbq buffer to the
2042 * firmware. If the function finds an empty slot in the HBQ, it will post the
2043 * buffer and place it on the hbq_buffer_list. The function will return zero if
2044 * it successfully post the buffer else it will return an error.
2047 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2048 struct hbq_dmabuf *hbq_buf)
2050 struct lpfc_hbq_entry *hbqe;
2051 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2053 lockdep_assert_held(&phba->hbalock);
2054 /* Get next HBQ entry slot to use */
2055 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2057 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2059 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2060 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2061 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2062 hbqe->bde.tus.f.bdeFlags = 0;
2063 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2064 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2066 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2067 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2069 readl(phba->hbq_put + hbqno);
2070 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2077 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2078 * @phba: Pointer to HBA context object.
2079 * @hbqno: HBQ number.
2080 * @hbq_buf: Pointer to HBQ buffer.
2082 * This function is called with the hbalock held to post an RQE to the SLI4
2083 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2084 * the hbq_buffer_list and return zero, otherwise it will return an error.
2087 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2088 struct hbq_dmabuf *hbq_buf)
2091 struct lpfc_rqe hrqe;
2092 struct lpfc_rqe drqe;
2093 struct lpfc_queue *hrq;
2094 struct lpfc_queue *drq;
2096 if (hbqno != LPFC_ELS_HBQ)
2098 hrq = phba->sli4_hba.hdr_rq;
2099 drq = phba->sli4_hba.dat_rq;
2101 lockdep_assert_held(&phba->hbalock);
2102 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2103 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2104 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2105 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2106 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2109 hbq_buf->tag = (rc | (hbqno << 16));
2110 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2114 /* HBQ for ELS and CT traffic. */
2115 static struct lpfc_hbq_init lpfc_els_hbq = {
2120 .ring_mask = (1 << LPFC_ELS_RING),
2127 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2132 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2133 * @phba: Pointer to HBA context object.
2134 * @hbqno: HBQ number.
2135 * @count: Number of HBQ buffers to be posted.
2137 * This function is called with no lock held to post more hbq buffers to the
2138 * given HBQ. The function returns the number of HBQ buffers successfully
2142 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2144 uint32_t i, posted = 0;
2145 unsigned long flags;
2146 struct hbq_dmabuf *hbq_buffer;
2147 LIST_HEAD(hbq_buf_list);
2148 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2151 if ((phba->hbqs[hbqno].buffer_count + count) >
2152 lpfc_hbq_defs[hbqno]->entry_count)
2153 count = lpfc_hbq_defs[hbqno]->entry_count -
2154 phba->hbqs[hbqno].buffer_count;
2157 /* Allocate HBQ entries */
2158 for (i = 0; i < count; i++) {
2159 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2162 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2164 /* Check whether HBQ is still in use */
2165 spin_lock_irqsave(&phba->hbalock, flags);
2166 if (!phba->hbq_in_use)
2168 while (!list_empty(&hbq_buf_list)) {
2169 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2171 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2173 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2174 phba->hbqs[hbqno].buffer_count++;
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2179 spin_unlock_irqrestore(&phba->hbalock, flags);
2182 spin_unlock_irqrestore(&phba->hbalock, flags);
2183 while (!list_empty(&hbq_buf_list)) {
2184 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2186 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2192 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2193 * @phba: Pointer to HBA context object.
2196 * This function posts more buffers to the HBQ. This function
2197 * is called with no lock held. The function returns the number of HBQ entries
2198 * successfully allocated.
2201 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2203 if (phba->sli_rev == LPFC_SLI_REV4)
2206 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2207 lpfc_hbq_defs[qno]->add_count);
2211 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2212 * @phba: Pointer to HBA context object.
2213 * @qno: HBQ queue number.
2215 * This function is called from SLI initialization code path with
2216 * no lock held to post initial HBQ buffers to firmware. The
2217 * function returns the number of HBQ entries successfully allocated.
2220 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2222 if (phba->sli_rev == LPFC_SLI_REV4)
2223 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2224 lpfc_hbq_defs[qno]->entry_count);
2226 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2227 lpfc_hbq_defs[qno]->init_count);
2231 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2233 * This function removes the first hbq buffer on an hbq list and returns a
2234 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2236 static struct hbq_dmabuf *
2237 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2239 struct lpfc_dmabuf *d_buf;
2241 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2244 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2248 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2249 * @phba: Pointer to HBA context object.
2252 * This function removes the first RQ buffer on an RQ buffer list and returns a
2253 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2255 static struct rqb_dmabuf *
2256 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2258 struct lpfc_dmabuf *h_buf;
2259 struct lpfc_rqb *rqbp;
2262 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2263 struct lpfc_dmabuf, list);
2266 rqbp->buffer_count--;
2267 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2271 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2272 * @phba: Pointer to HBA context object.
2273 * @tag: Tag of the hbq buffer.
2275 * This function searches for the hbq buffer associated with the given tag in
2276 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2277 * otherwise it returns NULL.
2279 static struct hbq_dmabuf *
2280 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2282 struct lpfc_dmabuf *d_buf;
2283 struct hbq_dmabuf *hbq_buf;
2287 if (hbqno >= LPFC_MAX_HBQS)
2290 spin_lock_irq(&phba->hbalock);
2291 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2292 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2293 if (hbq_buf->tag == tag) {
2294 spin_unlock_irq(&phba->hbalock);
2298 spin_unlock_irq(&phba->hbalock);
2299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300 "1803 Bad hbq tag. Data: x%x x%x\n",
2301 tag, phba->hbqs[tag >> 16].buffer_count);
2306 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2307 * @phba: Pointer to HBA context object.
2308 * @hbq_buffer: Pointer to HBQ buffer.
2310 * This function is called with hbalock. This function gives back
2311 * the hbq buffer to firmware. If the HBQ does not have space to
2312 * post the buffer, it will free the buffer.
2315 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2320 hbqno = hbq_buffer->tag >> 16;
2321 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2322 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2327 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2328 * @mbxCommand: mailbox command code.
2330 * This function is called by the mailbox event handler function to verify
2331 * that the completed mailbox command is a legitimate mailbox command. If the
2332 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2333 * and the mailbox event handler will take the HBA offline.
2336 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2340 switch (mbxCommand) {
2344 case MBX_WRITE_VPARMS:
2345 case MBX_RUN_BIU_DIAG:
2348 case MBX_CONFIG_LINK:
2349 case MBX_CONFIG_RING:
2350 case MBX_RESET_RING:
2351 case MBX_READ_CONFIG:
2352 case MBX_READ_RCONFIG:
2353 case MBX_READ_SPARM:
2354 case MBX_READ_STATUS:
2358 case MBX_READ_LNK_STAT:
2360 case MBX_UNREG_LOGIN:
2362 case MBX_DUMP_MEMORY:
2363 case MBX_DUMP_CONTEXT:
2366 case MBX_UPDATE_CFG:
2368 case MBX_DEL_LD_ENTRY:
2369 case MBX_RUN_PROGRAM:
2371 case MBX_SET_VARIABLE:
2372 case MBX_UNREG_D_ID:
2373 case MBX_KILL_BOARD:
2374 case MBX_CONFIG_FARP:
2377 case MBX_RUN_BIU_DIAG64:
2378 case MBX_CONFIG_PORT:
2379 case MBX_READ_SPARM64:
2380 case MBX_READ_RPI64:
2381 case MBX_REG_LOGIN64:
2382 case MBX_READ_TOPOLOGY:
2385 case MBX_LOAD_EXP_ROM:
2386 case MBX_ASYNCEVT_ENABLE:
2390 case MBX_PORT_CAPABILITIES:
2391 case MBX_PORT_IOV_CONTROL:
2392 case MBX_SLI4_CONFIG:
2393 case MBX_SLI4_REQ_FTRS:
2395 case MBX_UNREG_FCFI:
2400 case MBX_RESUME_RPI:
2401 case MBX_READ_EVENT_LOG_STATUS:
2402 case MBX_READ_EVENT_LOG:
2403 case MBX_SECURITY_MGMT:
2405 case MBX_ACCESS_VDATA:
2416 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2417 * @phba: Pointer to HBA context object.
2418 * @pmboxq: Pointer to mailbox command.
2420 * This is completion handler function for mailbox commands issued from
2421 * lpfc_sli_issue_mbox_wait function. This function is called by the
2422 * mailbox event handler function with no lock held. This function
2423 * will wake up thread waiting on the wait queue pointed by context1
2427 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2429 unsigned long drvr_flag;
2430 struct completion *pmbox_done;
2433 * If pmbox_done is empty, the driver thread gave up waiting and
2434 * continued running.
2436 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2437 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2438 pmbox_done = (struct completion *)pmboxq->context3;
2440 complete(pmbox_done);
2441 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2446 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2448 unsigned long iflags;
2450 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2451 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2452 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2453 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2454 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2455 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2457 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2461 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2462 * @phba: Pointer to HBA context object.
2463 * @pmb: Pointer to mailbox object.
2465 * This function is the default mailbox completion handler. It
2466 * frees the memory resources associated with the completed mailbox
2467 * command. If the completed command is a REG_LOGIN mailbox command,
2468 * this function will issue a UREG_LOGIN to re-claim the RPI.
2471 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2473 struct lpfc_vport *vport = pmb->vport;
2474 struct lpfc_dmabuf *mp;
2475 struct lpfc_nodelist *ndlp;
2476 struct Scsi_Host *shost;
2480 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2483 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2488 * If a REG_LOGIN succeeded after node is destroyed or node
2489 * is in re-discovery driver need to cleanup the RPI.
2491 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2492 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2493 !pmb->u.mb.mbxStatus) {
2494 rpi = pmb->u.mb.un.varWords[0];
2495 vpi = pmb->u.mb.un.varRegLogin.vpi;
2496 if (phba->sli_rev == LPFC_SLI_REV4)
2497 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2498 lpfc_unreg_login(phba, vpi, rpi, pmb);
2500 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2502 if (rc != MBX_NOT_FINISHED)
2506 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2507 !(phba->pport->load_flag & FC_UNLOADING) &&
2508 !pmb->u.mb.mbxStatus) {
2509 shost = lpfc_shost_from_vport(vport);
2510 spin_lock_irq(shost->host_lock);
2511 vport->vpi_state |= LPFC_VPI_REGISTERED;
2512 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2513 spin_unlock_irq(shost->host_lock);
2516 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2517 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2519 pmb->ctx_buf = NULL;
2520 pmb->ctx_ndlp = NULL;
2523 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2524 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2526 /* Check to see if there are any deferred events to process */
2530 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2531 "1438 UNREG cmpl deferred mbox x%x "
2532 "on NPort x%x Data: x%x x%x %px\n",
2533 ndlp->nlp_rpi, ndlp->nlp_DID,
2534 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2536 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2537 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2538 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2539 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2540 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2542 __lpfc_sli_rpi_release(vport, ndlp);
2544 if (vport->load_flag & FC_UNLOADING)
2546 pmb->ctx_ndlp = NULL;
2550 /* Check security permission status on INIT_LINK mailbox command */
2551 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2552 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2554 "2860 SLI authentication is required "
2555 "for INIT_LINK but has not done yet\n");
2557 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2558 lpfc_sli4_mbox_cmd_free(phba, pmb);
2560 mempool_free(pmb, phba->mbox_mem_pool);
2563 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2564 * @phba: Pointer to HBA context object.
2565 * @pmb: Pointer to mailbox object.
2567 * This function is the unreg rpi mailbox completion handler. It
2568 * frees the memory resources associated with the completed mailbox
2569 * command. An additional refrenece is put on the ndlp to prevent
2570 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2571 * the unreg mailbox command completes, this routine puts the
2576 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2578 struct lpfc_vport *vport = pmb->vport;
2579 struct lpfc_nodelist *ndlp;
2581 ndlp = pmb->ctx_ndlp;
2582 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2583 if (phba->sli_rev == LPFC_SLI_REV4 &&
2584 (bf_get(lpfc_sli_intf_if_type,
2585 &phba->sli4_hba.sli_intf) >=
2586 LPFC_SLI_INTF_IF_TYPE_2)) {
2589 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2590 "0010 UNREG_LOGIN vpi:%x "
2591 "rpi:%x DID:%x defer x%x flg x%x "
2593 vport->vpi, ndlp->nlp_rpi,
2594 ndlp->nlp_DID, ndlp->nlp_defer_did,
2596 ndlp->nlp_usg_map, ndlp);
2597 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2600 /* Check to see if there are any deferred
2603 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2604 (ndlp->nlp_defer_did !=
2605 NLP_EVT_NOTHING_PENDING)) {
2607 vport, KERN_INFO, LOG_DISCOVERY,
2608 "4111 UNREG cmpl deferred "
2610 "NPort x%x Data: x%x x%px\n",
2611 ndlp->nlp_rpi, ndlp->nlp_DID,
2612 ndlp->nlp_defer_did, ndlp);
2613 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2614 ndlp->nlp_defer_did =
2615 NLP_EVT_NOTHING_PENDING;
2616 lpfc_issue_els_plogi(
2617 vport, ndlp->nlp_DID, 0);
2619 __lpfc_sli_rpi_release(vport, ndlp);
2625 mempool_free(pmb, phba->mbox_mem_pool);
2629 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2630 * @phba: Pointer to HBA context object.
2632 * This function is called with no lock held. This function processes all
2633 * the completed mailbox commands and gives it to upper layers. The interrupt
2634 * service routine processes mailbox completion interrupt and adds completed
2635 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2636 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2637 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2638 * function returns the mailbox commands to the upper layer by calling the
2639 * completion handler function of each mailbox.
2642 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2649 phba->sli.slistat.mbox_event++;
2651 /* Get all completed mailboxe buffers into the cmplq */
2652 spin_lock_irq(&phba->hbalock);
2653 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2654 spin_unlock_irq(&phba->hbalock);
2656 /* Get a Mailbox buffer to setup mailbox commands for callback */
2658 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2664 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2666 lpfc_debugfs_disc_trc(pmb->vport,
2667 LPFC_DISC_TRC_MBOX_VPORT,
2668 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2669 (uint32_t)pmbox->mbxCommand,
2670 pmbox->un.varWords[0],
2671 pmbox->un.varWords[1]);
2674 lpfc_debugfs_disc_trc(phba->pport,
2676 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2677 (uint32_t)pmbox->mbxCommand,
2678 pmbox->un.varWords[0],
2679 pmbox->un.varWords[1]);
2684 * It is a fatal error if unknown mbox command completion.
2686 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2688 /* Unknown mailbox command compl */
2689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2690 "(%d):0323 Unknown Mailbox command "
2691 "x%x (x%x/x%x) Cmpl\n",
2692 pmb->vport ? pmb->vport->vpi :
2695 lpfc_sli_config_mbox_subsys_get(phba,
2697 lpfc_sli_config_mbox_opcode_get(phba,
2699 phba->link_state = LPFC_HBA_ERROR;
2700 phba->work_hs = HS_FFER3;
2701 lpfc_handle_eratt(phba);
2705 if (pmbox->mbxStatus) {
2706 phba->sli.slistat.mbox_stat_err++;
2707 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2708 /* Mbox cmd cmpl error - RETRYing */
2709 lpfc_printf_log(phba, KERN_INFO,
2711 "(%d):0305 Mbox cmd cmpl "
2712 "error - RETRYing Data: x%x "
2713 "(x%x/x%x) x%x x%x x%x\n",
2714 pmb->vport ? pmb->vport->vpi :
2717 lpfc_sli_config_mbox_subsys_get(phba,
2719 lpfc_sli_config_mbox_opcode_get(phba,
2722 pmbox->un.varWords[0],
2723 pmb->vport ? pmb->vport->port_state :
2724 LPFC_VPORT_UNKNOWN);
2725 pmbox->mbxStatus = 0;
2726 pmbox->mbxOwner = OWN_HOST;
2727 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2728 if (rc != MBX_NOT_FINISHED)
2733 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2734 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2735 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2736 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2738 pmb->vport ? pmb->vport->vpi : 0,
2740 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2741 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2743 *((uint32_t *) pmbox),
2744 pmbox->un.varWords[0],
2745 pmbox->un.varWords[1],
2746 pmbox->un.varWords[2],
2747 pmbox->un.varWords[3],
2748 pmbox->un.varWords[4],
2749 pmbox->un.varWords[5],
2750 pmbox->un.varWords[6],
2751 pmbox->un.varWords[7],
2752 pmbox->un.varWords[8],
2753 pmbox->un.varWords[9],
2754 pmbox->un.varWords[10]);
2757 pmb->mbox_cmpl(phba,pmb);
2763 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2764 * @phba: Pointer to HBA context object.
2765 * @pring: Pointer to driver SLI ring object.
2768 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2769 * is set in the tag the buffer is posted for a particular exchange,
2770 * the function will return the buffer without replacing the buffer.
2771 * If the buffer is for unsolicited ELS or CT traffic, this function
2772 * returns the buffer and also posts another buffer to the firmware.
2774 static struct lpfc_dmabuf *
2775 lpfc_sli_get_buff(struct lpfc_hba *phba,
2776 struct lpfc_sli_ring *pring,
2779 struct hbq_dmabuf *hbq_entry;
2781 if (tag & QUE_BUFTAG_BIT)
2782 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2783 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2786 return &hbq_entry->dbuf;
2790 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2791 * containing a NVME LS request.
2792 * @phba: pointer to lpfc hba data structure.
2793 * @piocb: pointer to the iocbq struct representing the sequence starting
2796 * This routine initially validates the NVME LS, validates there is a login
2797 * with the port that sent the LS, and then calls the appropriate nvme host
2798 * or target LS request handler.
2801 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2803 struct lpfc_nodelist *ndlp;
2804 struct lpfc_dmabuf *d_buf;
2805 struct hbq_dmabuf *nvmebuf;
2806 struct fc_frame_header *fc_hdr;
2807 struct lpfc_async_xchg_ctx *axchg = NULL;
2808 char *failwhy = NULL;
2809 uint32_t oxid, sid, did, fctl, size;
2812 d_buf = piocb->context2;
2814 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2815 fc_hdr = nvmebuf->hbuf.virt;
2816 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2817 sid = sli4_sid_from_fc_hdr(fc_hdr);
2818 did = sli4_did_from_fc_hdr(fc_hdr);
2819 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2820 fc_hdr->fh_f_ctl[1] << 8 |
2821 fc_hdr->fh_f_ctl[2]);
2822 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2824 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2827 if (phba->pport->load_flag & FC_UNLOADING) {
2828 failwhy = "Driver Unloading";
2829 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2830 failwhy = "NVME FC4 Disabled";
2831 } else if (!phba->nvmet_support && !phba->pport->localport) {
2832 failwhy = "No Localport";
2833 } else if (phba->nvmet_support && !phba->targetport) {
2834 failwhy = "No Targetport";
2835 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2836 failwhy = "Bad NVME LS R_CTL";
2837 } else if (unlikely((fctl & 0x00FF0000) !=
2838 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2839 failwhy = "Bad NVME LS F_CTL";
2841 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2843 failwhy = "No CTX memory";
2846 if (unlikely(failwhy)) {
2847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2848 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2849 sid, oxid, failwhy);
2853 /* validate the source of the LS is logged in */
2854 ndlp = lpfc_findnode_did(phba->pport, sid);
2855 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2856 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2857 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2858 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2859 "6216 NVME Unsol rcv: No ndlp: "
2860 "NPort_ID x%x oxid x%x\n",
2871 axchg->state = LPFC_NVME_STE_LS_RCV;
2872 axchg->entry_cnt = 1;
2873 axchg->rqb_buffer = (void *)nvmebuf;
2874 axchg->hdwq = &phba->sli4_hba.hdwq[0];
2875 axchg->payload = nvmebuf->dbuf.virt;
2876 INIT_LIST_HEAD(&axchg->list);
2878 if (phba->nvmet_support)
2879 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2881 ret = lpfc_nvme_handle_lsreq(phba, axchg);
2883 /* if zero, LS was successfully handled. If non-zero, LS not handled */
2887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2888 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2889 "NVMe%s handler failed %d\n",
2891 (phba->nvmet_support) ? "T" : "I", ret);
2895 /* recycle receive buffer */
2896 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2898 /* If start of new exchange, abort it */
2899 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2900 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2907 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2908 * @phba: Pointer to HBA context object.
2909 * @pring: Pointer to driver SLI ring object.
2910 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2911 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2912 * @fch_type: the type for the first frame of the sequence.
2914 * This function is called with no lock held. This function uses the r_ctl and
2915 * type of the received sequence to find the correct callback function to call
2916 * to process the sequence.
2919 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2920 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2927 lpfc_nvme_unsol_ls_handler(phba, saveq);
2933 /* unSolicited Responses */
2934 if (pring->prt[0].profile) {
2935 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2936 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2940 /* We must search, based on rctl / type
2941 for the right routine */
2942 for (i = 0; i < pring->num_mask; i++) {
2943 if ((pring->prt[i].rctl == fch_r_ctl) &&
2944 (pring->prt[i].type == fch_type)) {
2945 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2946 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2947 (phba, pring, saveq);
2955 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2956 * @phba: Pointer to HBA context object.
2957 * @pring: Pointer to driver SLI ring object.
2958 * @saveq: Pointer to the unsolicited iocb.
2960 * This function is called with no lock held by the ring event handler
2961 * when there is an unsolicited iocb posted to the response ring by the
2962 * firmware. This function gets the buffer associated with the iocbs
2963 * and calls the event handler for the ring. This function handles both
2964 * qring buffers and hbq buffers.
2965 * When the function returns 1 the caller can free the iocb object otherwise
2966 * upper layer functions will free the iocb objects.
2969 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2970 struct lpfc_iocbq *saveq)
2974 uint32_t Rctl, Type;
2975 struct lpfc_iocbq *iocbq;
2976 struct lpfc_dmabuf *dmzbuf;
2978 irsp = &(saveq->iocb);
2980 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2981 if (pring->lpfc_sli_rcv_async_status)
2982 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2984 lpfc_printf_log(phba,
2987 "0316 Ring %d handler: unexpected "
2988 "ASYNC_STATUS iocb received evt_code "
2991 irsp->un.asyncstat.evt_code);
2995 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2996 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2997 if (irsp->ulpBdeCount > 0) {
2998 dmzbuf = lpfc_sli_get_buff(phba, pring,
2999 irsp->un.ulpWord[3]);
3000 lpfc_in_buf_free(phba, dmzbuf);
3003 if (irsp->ulpBdeCount > 1) {
3004 dmzbuf = lpfc_sli_get_buff(phba, pring,
3005 irsp->unsli3.sli3Words[3]);
3006 lpfc_in_buf_free(phba, dmzbuf);
3009 if (irsp->ulpBdeCount > 2) {
3010 dmzbuf = lpfc_sli_get_buff(phba, pring,
3011 irsp->unsli3.sli3Words[7]);
3012 lpfc_in_buf_free(phba, dmzbuf);
3018 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3019 if (irsp->ulpBdeCount != 0) {
3020 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3021 irsp->un.ulpWord[3]);
3022 if (!saveq->context2)
3023 lpfc_printf_log(phba,
3026 "0341 Ring %d Cannot find buffer for "
3027 "an unsolicited iocb. tag 0x%x\n",
3029 irsp->un.ulpWord[3]);
3031 if (irsp->ulpBdeCount == 2) {
3032 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3033 irsp->unsli3.sli3Words[7]);
3034 if (!saveq->context3)
3035 lpfc_printf_log(phba,
3038 "0342 Ring %d Cannot find buffer for an"
3039 " unsolicited iocb. tag 0x%x\n",
3041 irsp->unsli3.sli3Words[7]);
3043 list_for_each_entry(iocbq, &saveq->list, list) {
3044 irsp = &(iocbq->iocb);
3045 if (irsp->ulpBdeCount != 0) {
3046 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3047 irsp->un.ulpWord[3]);
3048 if (!iocbq->context2)
3049 lpfc_printf_log(phba,
3052 "0343 Ring %d Cannot find "
3053 "buffer for an unsolicited iocb"
3054 ". tag 0x%x\n", pring->ringno,
3055 irsp->un.ulpWord[3]);
3057 if (irsp->ulpBdeCount == 2) {
3058 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3059 irsp->unsli3.sli3Words[7]);
3060 if (!iocbq->context3)
3061 lpfc_printf_log(phba,
3064 "0344 Ring %d Cannot find "
3065 "buffer for an unsolicited "
3068 irsp->unsli3.sli3Words[7]);
3072 if (irsp->ulpBdeCount != 0 &&
3073 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3074 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3077 /* search continue save q for same XRI */
3078 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3079 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3080 saveq->iocb.unsli3.rcvsli3.ox_id) {
3081 list_add_tail(&saveq->list, &iocbq->list);
3087 list_add_tail(&saveq->clist,
3088 &pring->iocb_continue_saveq);
3089 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3090 list_del_init(&iocbq->clist);
3092 irsp = &(saveq->iocb);
3096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3099 Rctl = FC_RCTL_ELS_REQ;
3102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3103 Rctl = w5p->hcsw.Rctl;
3104 Type = w5p->hcsw.Type;
3106 /* Firmware Workaround */
3107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3110 Rctl = FC_RCTL_ELS_REQ;
3112 w5p->hcsw.Rctl = Rctl;
3113 w5p->hcsw.Type = Type;
3117 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3118 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3119 "0313 Ring %d handler: unexpected Rctl x%x "
3120 "Type x%x received\n",
3121 pring->ringno, Rctl, Type);
3127 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3128 * @phba: Pointer to HBA context object.
3129 * @pring: Pointer to driver SLI ring object.
3130 * @prspiocb: Pointer to response iocb object.
3132 * This function looks up the iocb_lookup table to get the command iocb
3133 * corresponding to the given response iocb using the iotag of the
3134 * response iocb. The driver calls this function with the hbalock held
3135 * for SLI3 ports or the ring lock held for SLI4 ports.
3136 * This function returns the command iocb object if it finds the command
3137 * iocb else returns NULL.
3139 static struct lpfc_iocbq *
3140 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3141 struct lpfc_sli_ring *pring,
3142 struct lpfc_iocbq *prspiocb)
3144 struct lpfc_iocbq *cmd_iocb = NULL;
3146 spinlock_t *temp_lock = NULL;
3147 unsigned long iflag = 0;
3149 if (phba->sli_rev == LPFC_SLI_REV4)
3150 temp_lock = &pring->ring_lock;
3152 temp_lock = &phba->hbalock;
3154 spin_lock_irqsave(temp_lock, iflag);
3155 iotag = prspiocb->iocb.ulpIoTag;
3157 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3158 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3159 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3160 /* remove from txcmpl queue list */
3161 list_del_init(&cmd_iocb->list);
3162 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3163 pring->txcmplq_cnt--;
3164 spin_unlock_irqrestore(temp_lock, iflag);
3169 spin_unlock_irqrestore(temp_lock, iflag);
3170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3171 "0317 iotag x%x is out of "
3172 "range: max iotag x%x wd0 x%x\n",
3173 iotag, phba->sli.last_iotag,
3174 *(((uint32_t *) &prspiocb->iocb) + 7));
3179 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3180 * @phba: Pointer to HBA context object.
3181 * @pring: Pointer to driver SLI ring object.
3184 * This function looks up the iocb_lookup table to get the command iocb
3185 * corresponding to the given iotag. The driver calls this function with
3186 * the ring lock held because this function is an SLI4 port only helper.
3187 * This function returns the command iocb object if it finds the command
3188 * iocb else returns NULL.
3190 static struct lpfc_iocbq *
3191 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3192 struct lpfc_sli_ring *pring, uint16_t iotag)
3194 struct lpfc_iocbq *cmd_iocb = NULL;
3195 spinlock_t *temp_lock = NULL;
3196 unsigned long iflag = 0;
3198 if (phba->sli_rev == LPFC_SLI_REV4)
3199 temp_lock = &pring->ring_lock;
3201 temp_lock = &phba->hbalock;
3203 spin_lock_irqsave(temp_lock, iflag);
3204 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3205 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3206 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3207 /* remove from txcmpl queue list */
3208 list_del_init(&cmd_iocb->list);
3209 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3210 pring->txcmplq_cnt--;
3211 spin_unlock_irqrestore(temp_lock, iflag);
3216 spin_unlock_irqrestore(temp_lock, iflag);
3217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3218 "0372 iotag x%x lookup error: max iotag (x%x) "
3220 iotag, phba->sli.last_iotag,
3221 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3226 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3227 * @phba: Pointer to HBA context object.
3228 * @pring: Pointer to driver SLI ring object.
3229 * @saveq: Pointer to the response iocb to be processed.
3231 * This function is called by the ring event handler for non-fcp
3232 * rings when there is a new response iocb in the response ring.
3233 * The caller is not required to hold any locks. This function
3234 * gets the command iocb associated with the response iocb and
3235 * calls the completion handler for the command iocb. If there
3236 * is no completion handler, the function will free the resources
3237 * associated with command iocb. If the response iocb is for
3238 * an already aborted command iocb, the status of the completion
3239 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3240 * This function always returns 1.
3243 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3244 struct lpfc_iocbq *saveq)
3246 struct lpfc_iocbq *cmdiocbp;
3248 unsigned long iflag;
3250 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3252 if (cmdiocbp->iocb_cmpl) {
3254 * If an ELS command failed send an event to mgmt
3257 if (saveq->iocb.ulpStatus &&
3258 (pring->ringno == LPFC_ELS_RING) &&
3259 (cmdiocbp->iocb.ulpCommand ==
3260 CMD_ELS_REQUEST64_CR))
3261 lpfc_send_els_failure_event(phba,
3265 * Post all ELS completions to the worker thread.
3266 * All other are passed to the completion callback.
3268 if (pring->ringno == LPFC_ELS_RING) {
3269 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3270 (cmdiocbp->iocb_flag &
3271 LPFC_DRIVER_ABORTED)) {
3272 spin_lock_irqsave(&phba->hbalock,
3274 cmdiocbp->iocb_flag &=
3275 ~LPFC_DRIVER_ABORTED;
3276 spin_unlock_irqrestore(&phba->hbalock,
3278 saveq->iocb.ulpStatus =
3279 IOSTAT_LOCAL_REJECT;
3280 saveq->iocb.un.ulpWord[4] =
3283 /* Firmware could still be in progress
3284 * of DMAing payload, so don't free data
3285 * buffer till after a hbeat.
3287 spin_lock_irqsave(&phba->hbalock,
3289 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3290 spin_unlock_irqrestore(&phba->hbalock,
3293 if (phba->sli_rev == LPFC_SLI_REV4) {
3294 if (saveq->iocb_flag &
3295 LPFC_EXCHANGE_BUSY) {
3296 /* Set cmdiocb flag for the
3297 * exchange busy so sgl (xri)
3298 * will not be released until
3299 * the abort xri is received
3303 &phba->hbalock, iflag);
3304 cmdiocbp->iocb_flag |=
3306 spin_unlock_irqrestore(
3307 &phba->hbalock, iflag);
3309 if (cmdiocbp->iocb_flag &
3310 LPFC_DRIVER_ABORTED) {
3312 * Clear LPFC_DRIVER_ABORTED
3313 * bit in case it was driver
3317 &phba->hbalock, iflag);
3318 cmdiocbp->iocb_flag &=
3319 ~LPFC_DRIVER_ABORTED;
3320 spin_unlock_irqrestore(
3321 &phba->hbalock, iflag);
3322 cmdiocbp->iocb.ulpStatus =
3323 IOSTAT_LOCAL_REJECT;
3324 cmdiocbp->iocb.un.ulpWord[4] =
3325 IOERR_ABORT_REQUESTED;
3327 * For SLI4, irsiocb contains
3328 * NO_XRI in sli_xritag, it
3329 * shall not affect releasing
3330 * sgl (xri) process.
3332 saveq->iocb.ulpStatus =
3333 IOSTAT_LOCAL_REJECT;
3334 saveq->iocb.un.ulpWord[4] =
3337 &phba->hbalock, iflag);
3339 LPFC_DELAY_MEM_FREE;
3340 spin_unlock_irqrestore(
3341 &phba->hbalock, iflag);
3345 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3347 lpfc_sli_release_iocbq(phba, cmdiocbp);
3350 * Unknown initiating command based on the response iotag.
3351 * This could be the case on the ELS ring because of
3354 if (pring->ringno != LPFC_ELS_RING) {
3356 * Ring <ringno> handler: unexpected completion IoTag
3359 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3360 "0322 Ring %d handler: "
3361 "unexpected completion IoTag x%x "
3362 "Data: x%x x%x x%x x%x\n",
3364 saveq->iocb.ulpIoTag,
3365 saveq->iocb.ulpStatus,
3366 saveq->iocb.un.ulpWord[4],
3367 saveq->iocb.ulpCommand,
3368 saveq->iocb.ulpContext);
3376 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3377 * @phba: Pointer to HBA context object.
3378 * @pring: Pointer to driver SLI ring object.
3380 * This function is called from the iocb ring event handlers when
3381 * put pointer is ahead of the get pointer for a ring. This function signal
3382 * an error attention condition to the worker thread and the worker
3383 * thread will transition the HBA to offline state.
3386 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3388 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3390 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3391 * rsp ring <portRspMax>
3393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394 "0312 Ring %d handler: portRspPut %d "
3395 "is bigger than rsp ring %d\n",
3396 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3397 pring->sli.sli3.numRiocb);
3399 phba->link_state = LPFC_HBA_ERROR;
3402 * All error attention handlers are posted to
3405 phba->work_ha |= HA_ERATT;
3406 phba->work_hs = HS_FFER3;
3408 lpfc_worker_wake_up(phba);
3414 * lpfc_poll_eratt - Error attention polling timer timeout handler
3415 * @t: Context to fetch pointer to address of HBA context object from.
3417 * This function is invoked by the Error Attention polling timer when the
3418 * timer times out. It will check the SLI Error Attention register for
3419 * possible attention events. If so, it will post an Error Attention event
3420 * and wake up worker thread to process it. Otherwise, it will set up the
3421 * Error Attention polling timer for the next poll.
3423 void lpfc_poll_eratt(struct timer_list *t)
3425 struct lpfc_hba *phba;
3427 uint64_t sli_intr, cnt;
3429 phba = from_timer(phba, t, eratt_poll);
3431 /* Here we will also keep track of interrupts per sec of the hba */
3432 sli_intr = phba->sli.slistat.sli_intr;
3434 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3435 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3438 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3440 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3441 do_div(cnt, phba->eratt_poll_interval);
3442 phba->sli.slistat.sli_ips = cnt;
3444 phba->sli.slistat.sli_prev_intr = sli_intr;
3446 /* Check chip HA register for error event */
3447 eratt = lpfc_sli_check_eratt(phba);
3450 /* Tell the worker thread there is work to do */
3451 lpfc_worker_wake_up(phba);
3453 /* Restart the timer for next eratt poll */
3454 mod_timer(&phba->eratt_poll,
3456 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3462 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3463 * @phba: Pointer to HBA context object.
3464 * @pring: Pointer to driver SLI ring object.
3465 * @mask: Host attention register mask for this ring.
3467 * This function is called from the interrupt context when there is a ring
3468 * event for the fcp ring. The caller does not hold any lock.
3469 * The function processes each response iocb in the response ring until it
3470 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3471 * LE bit set. The function will call the completion handler of the command iocb
3472 * if the response iocb indicates a completion for a command iocb or it is
3473 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3474 * function if this is an unsolicited iocb.
3475 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3476 * to check it explicitly.
3479 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3480 struct lpfc_sli_ring *pring, uint32_t mask)
3482 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3483 IOCB_t *irsp = NULL;
3484 IOCB_t *entry = NULL;
3485 struct lpfc_iocbq *cmdiocbq = NULL;
3486 struct lpfc_iocbq rspiocbq;
3488 uint32_t portRspPut, portRspMax;
3490 lpfc_iocb_type type;
3491 unsigned long iflag;
3492 uint32_t rsp_cmpl = 0;
3494 spin_lock_irqsave(&phba->hbalock, iflag);
3495 pring->stats.iocb_event++;
3498 * The next available response entry should never exceed the maximum
3499 * entries. If it does, treat it as an adapter hardware error.
3501 portRspMax = pring->sli.sli3.numRiocb;
3502 portRspPut = le32_to_cpu(pgp->rspPutInx);
3503 if (unlikely(portRspPut >= portRspMax)) {
3504 lpfc_sli_rsp_pointers_error(phba, pring);
3505 spin_unlock_irqrestore(&phba->hbalock, iflag);
3508 if (phba->fcp_ring_in_use) {
3509 spin_unlock_irqrestore(&phba->hbalock, iflag);
3512 phba->fcp_ring_in_use = 1;
3515 while (pring->sli.sli3.rspidx != portRspPut) {
3517 * Fetch an entry off the ring and copy it into a local data
3518 * structure. The copy involves a byte-swap since the
3519 * network byte order and pci byte orders are different.
3521 entry = lpfc_resp_iocb(phba, pring);
3522 phba->last_completion_time = jiffies;
3524 if (++pring->sli.sli3.rspidx >= portRspMax)
3525 pring->sli.sli3.rspidx = 0;
3527 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3528 (uint32_t *) &rspiocbq.iocb,
3529 phba->iocb_rsp_size);
3530 INIT_LIST_HEAD(&(rspiocbq.list));
3531 irsp = &rspiocbq.iocb;
3533 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3534 pring->stats.iocb_rsp++;
3537 if (unlikely(irsp->ulpStatus)) {
3539 * If resource errors reported from HBA, reduce
3540 * queuedepths of the SCSI device.
3542 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3543 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3544 IOERR_NO_RESOURCES)) {
3545 spin_unlock_irqrestore(&phba->hbalock, iflag);
3546 phba->lpfc_rampdown_queue_depth(phba);
3547 spin_lock_irqsave(&phba->hbalock, iflag);
3550 /* Rsp ring <ringno> error: IOCB */
3551 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3552 "0336 Rsp Ring %d error: IOCB Data: "
3553 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3555 irsp->un.ulpWord[0],
3556 irsp->un.ulpWord[1],
3557 irsp->un.ulpWord[2],
3558 irsp->un.ulpWord[3],
3559 irsp->un.ulpWord[4],
3560 irsp->un.ulpWord[5],
3561 *(uint32_t *)&irsp->un1,
3562 *((uint32_t *)&irsp->un1 + 1));
3566 case LPFC_ABORT_IOCB:
3569 * Idle exchange closed via ABTS from port. No iocb
3570 * resources need to be recovered.
3572 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3573 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3574 "0333 IOCB cmd 0x%x"
3575 " processed. Skipping"
3581 spin_unlock_irqrestore(&phba->hbalock, iflag);
3582 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3584 spin_lock_irqsave(&phba->hbalock, iflag);
3585 if (unlikely(!cmdiocbq))
3587 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3588 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3589 if (cmdiocbq->iocb_cmpl) {
3590 spin_unlock_irqrestore(&phba->hbalock, iflag);
3591 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3593 spin_lock_irqsave(&phba->hbalock, iflag);
3596 case LPFC_UNSOL_IOCB:
3597 spin_unlock_irqrestore(&phba->hbalock, iflag);
3598 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3599 spin_lock_irqsave(&phba->hbalock, iflag);
3602 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3603 char adaptermsg[LPFC_MAX_ADPTMSG];
3604 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3605 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3607 dev_warn(&((phba->pcidev)->dev),
3609 phba->brd_no, adaptermsg);
3611 /* Unknown IOCB command */
3612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3613 "0334 Unknown IOCB command "
3614 "Data: x%x, x%x x%x x%x x%x\n",
3615 type, irsp->ulpCommand,
3624 * The response IOCB has been processed. Update the ring
3625 * pointer in SLIM. If the port response put pointer has not
3626 * been updated, sync the pgp->rspPutInx and fetch the new port
3627 * response put pointer.
3629 writel(pring->sli.sli3.rspidx,
3630 &phba->host_gp[pring->ringno].rspGetInx);
3632 if (pring->sli.sli3.rspidx == portRspPut)
3633 portRspPut = le32_to_cpu(pgp->rspPutInx);
3636 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3637 pring->stats.iocb_rsp_full++;
3638 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3639 writel(status, phba->CAregaddr);
3640 readl(phba->CAregaddr);
3642 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3643 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3644 pring->stats.iocb_cmd_empty++;
3646 /* Force update of the local copy of cmdGetInx */
3647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3648 lpfc_sli_resume_iocb(phba, pring);
3650 if ((pring->lpfc_sli_cmd_available))
3651 (pring->lpfc_sli_cmd_available) (phba, pring);
3655 phba->fcp_ring_in_use = 0;
3656 spin_unlock_irqrestore(&phba->hbalock, iflag);
3661 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3662 * @phba: Pointer to HBA context object.
3663 * @pring: Pointer to driver SLI ring object.
3664 * @rspiocbp: Pointer to driver response IOCB object.
3666 * This function is called from the worker thread when there is a slow-path
3667 * response IOCB to process. This function chains all the response iocbs until
3668 * seeing the iocb with the LE bit set. The function will call
3669 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3670 * completion of a command iocb. The function will call the
3671 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3672 * The function frees the resources or calls the completion handler if this
3673 * iocb is an abort completion. The function returns NULL when the response
3674 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3675 * this function shall chain the iocb on to the iocb_continueq and return the
3676 * response iocb passed in.
3678 static struct lpfc_iocbq *
3679 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3680 struct lpfc_iocbq *rspiocbp)
3682 struct lpfc_iocbq *saveq;
3683 struct lpfc_iocbq *cmdiocbp;
3684 struct lpfc_iocbq *next_iocb;
3685 IOCB_t *irsp = NULL;
3686 uint32_t free_saveq;
3687 uint8_t iocb_cmd_type;
3688 lpfc_iocb_type type;
3689 unsigned long iflag;
3692 spin_lock_irqsave(&phba->hbalock, iflag);
3693 /* First add the response iocb to the countinueq list */
3694 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3695 pring->iocb_continueq_cnt++;
3697 /* Now, determine whether the list is completed for processing */
3698 irsp = &rspiocbp->iocb;
3701 * By default, the driver expects to free all resources
3702 * associated with this iocb completion.
3705 saveq = list_get_first(&pring->iocb_continueq,
3706 struct lpfc_iocbq, list);
3707 irsp = &(saveq->iocb);
3708 list_del_init(&pring->iocb_continueq);
3709 pring->iocb_continueq_cnt = 0;
3711 pring->stats.iocb_rsp++;
3714 * If resource errors reported from HBA, reduce
3715 * queuedepths of the SCSI device.
3717 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3718 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3719 IOERR_NO_RESOURCES)) {
3720 spin_unlock_irqrestore(&phba->hbalock, iflag);
3721 phba->lpfc_rampdown_queue_depth(phba);
3722 spin_lock_irqsave(&phba->hbalock, iflag);
3725 if (irsp->ulpStatus) {
3726 /* Rsp ring <ringno> error: IOCB */
3727 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3728 "0328 Rsp Ring %d error: "
3733 "x%x x%x x%x x%x\n",
3735 irsp->un.ulpWord[0],
3736 irsp->un.ulpWord[1],
3737 irsp->un.ulpWord[2],
3738 irsp->un.ulpWord[3],
3739 irsp->un.ulpWord[4],
3740 irsp->un.ulpWord[5],
3741 *(((uint32_t *) irsp) + 6),
3742 *(((uint32_t *) irsp) + 7),
3743 *(((uint32_t *) irsp) + 8),
3744 *(((uint32_t *) irsp) + 9),
3745 *(((uint32_t *) irsp) + 10),
3746 *(((uint32_t *) irsp) + 11),
3747 *(((uint32_t *) irsp) + 12),
3748 *(((uint32_t *) irsp) + 13),
3749 *(((uint32_t *) irsp) + 14),
3750 *(((uint32_t *) irsp) + 15));
3754 * Fetch the IOCB command type and call the correct completion
3755 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3756 * get freed back to the lpfc_iocb_list by the discovery
3759 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3760 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3763 spin_unlock_irqrestore(&phba->hbalock, iflag);
3764 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3765 spin_lock_irqsave(&phba->hbalock, iflag);
3768 case LPFC_UNSOL_IOCB:
3769 spin_unlock_irqrestore(&phba->hbalock, iflag);
3770 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3771 spin_lock_irqsave(&phba->hbalock, iflag);
3776 case LPFC_ABORT_IOCB:
3778 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3779 spin_unlock_irqrestore(&phba->hbalock, iflag);
3780 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3782 spin_lock_irqsave(&phba->hbalock, iflag);
3785 /* Call the specified completion routine */
3786 if (cmdiocbp->iocb_cmpl) {
3787 spin_unlock_irqrestore(&phba->hbalock,
3789 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3791 spin_lock_irqsave(&phba->hbalock,
3794 __lpfc_sli_release_iocbq(phba,
3799 case LPFC_UNKNOWN_IOCB:
3800 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3801 char adaptermsg[LPFC_MAX_ADPTMSG];
3802 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3803 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3805 dev_warn(&((phba->pcidev)->dev),
3807 phba->brd_no, adaptermsg);
3809 /* Unknown IOCB command */
3810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3811 "0335 Unknown IOCB "
3812 "command Data: x%x "
3823 list_for_each_entry_safe(rspiocbp, next_iocb,
3824 &saveq->list, list) {
3825 list_del_init(&rspiocbp->list);
3826 __lpfc_sli_release_iocbq(phba, rspiocbp);
3828 __lpfc_sli_release_iocbq(phba, saveq);
3832 spin_unlock_irqrestore(&phba->hbalock, iflag);
3837 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3838 * @phba: Pointer to HBA context object.
3839 * @pring: Pointer to driver SLI ring object.
3840 * @mask: Host attention register mask for this ring.
3842 * This routine wraps the actual slow_ring event process routine from the
3843 * API jump table function pointer from the lpfc_hba struct.
3846 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3847 struct lpfc_sli_ring *pring, uint32_t mask)
3849 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3853 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3854 * @phba: Pointer to HBA context object.
3855 * @pring: Pointer to driver SLI ring object.
3856 * @mask: Host attention register mask for this ring.
3858 * This function is called from the worker thread when there is a ring event
3859 * for non-fcp rings. The caller does not hold any lock. The function will
3860 * remove each response iocb in the response ring and calls the handle
3861 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3864 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3865 struct lpfc_sli_ring *pring, uint32_t mask)
3867 struct lpfc_pgp *pgp;
3869 IOCB_t *irsp = NULL;
3870 struct lpfc_iocbq *rspiocbp = NULL;
3871 uint32_t portRspPut, portRspMax;
3872 unsigned long iflag;
3875 pgp = &phba->port_gp[pring->ringno];
3876 spin_lock_irqsave(&phba->hbalock, iflag);
3877 pring->stats.iocb_event++;
3880 * The next available response entry should never exceed the maximum
3881 * entries. If it does, treat it as an adapter hardware error.
3883 portRspMax = pring->sli.sli3.numRiocb;
3884 portRspPut = le32_to_cpu(pgp->rspPutInx);
3885 if (portRspPut >= portRspMax) {
3887 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3888 * rsp ring <portRspMax>
3890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3891 "0303 Ring %d handler: portRspPut %d "
3892 "is bigger than rsp ring %d\n",
3893 pring->ringno, portRspPut, portRspMax);
3895 phba->link_state = LPFC_HBA_ERROR;
3896 spin_unlock_irqrestore(&phba->hbalock, iflag);
3898 phba->work_hs = HS_FFER3;
3899 lpfc_handle_eratt(phba);
3905 while (pring->sli.sli3.rspidx != portRspPut) {
3907 * Build a completion list and call the appropriate handler.
3908 * The process is to get the next available response iocb, get
3909 * a free iocb from the list, copy the response data into the
3910 * free iocb, insert to the continuation list, and update the
3911 * next response index to slim. This process makes response
3912 * iocb's in the ring available to DMA as fast as possible but
3913 * pays a penalty for a copy operation. Since the iocb is
3914 * only 32 bytes, this penalty is considered small relative to
3915 * the PCI reads for register values and a slim write. When
3916 * the ulpLe field is set, the entire Command has been
3919 entry = lpfc_resp_iocb(phba, pring);
3921 phba->last_completion_time = jiffies;
3922 rspiocbp = __lpfc_sli_get_iocbq(phba);
3923 if (rspiocbp == NULL) {
3924 printk(KERN_ERR "%s: out of buffers! Failing "
3925 "completion.\n", __func__);
3929 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3930 phba->iocb_rsp_size);
3931 irsp = &rspiocbp->iocb;
3933 if (++pring->sli.sli3.rspidx >= portRspMax)
3934 pring->sli.sli3.rspidx = 0;
3936 if (pring->ringno == LPFC_ELS_RING) {
3937 lpfc_debugfs_slow_ring_trc(phba,
3938 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3939 *(((uint32_t *) irsp) + 4),
3940 *(((uint32_t *) irsp) + 6),
3941 *(((uint32_t *) irsp) + 7));
3944 writel(pring->sli.sli3.rspidx,
3945 &phba->host_gp[pring->ringno].rspGetInx);
3947 spin_unlock_irqrestore(&phba->hbalock, iflag);
3948 /* Handle the response IOCB */
3949 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3950 spin_lock_irqsave(&phba->hbalock, iflag);
3953 * If the port response put pointer has not been updated, sync
3954 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3955 * response put pointer.
3957 if (pring->sli.sli3.rspidx == portRspPut) {
3958 portRspPut = le32_to_cpu(pgp->rspPutInx);
3960 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3962 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3963 /* At least one response entry has been freed */
3964 pring->stats.iocb_rsp_full++;
3965 /* SET RxRE_RSP in Chip Att register */
3966 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3967 writel(status, phba->CAregaddr);
3968 readl(phba->CAregaddr); /* flush */
3970 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3971 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3972 pring->stats.iocb_cmd_empty++;
3974 /* Force update of the local copy of cmdGetInx */
3975 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3976 lpfc_sli_resume_iocb(phba, pring);
3978 if ((pring->lpfc_sli_cmd_available))
3979 (pring->lpfc_sli_cmd_available) (phba, pring);
3983 spin_unlock_irqrestore(&phba->hbalock, iflag);
3988 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3989 * @phba: Pointer to HBA context object.
3990 * @pring: Pointer to driver SLI ring object.
3991 * @mask: Host attention register mask for this ring.
3993 * This function is called from the worker thread when there is a pending
3994 * ELS response iocb on the driver internal slow-path response iocb worker
3995 * queue. The caller does not hold any lock. The function will remove each
3996 * response iocb from the response worker queue and calls the handle
3997 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4000 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4001 struct lpfc_sli_ring *pring, uint32_t mask)
4003 struct lpfc_iocbq *irspiocbq;
4004 struct hbq_dmabuf *dmabuf;
4005 struct lpfc_cq_event *cq_event;
4006 unsigned long iflag;
4009 spin_lock_irqsave(&phba->hbalock, iflag);
4010 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4011 spin_unlock_irqrestore(&phba->hbalock, iflag);
4012 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4013 /* Get the response iocb from the head of work queue */
4014 spin_lock_irqsave(&phba->hbalock, iflag);
4015 list_remove_head(&phba->sli4_hba.sp_queue_event,
4016 cq_event, struct lpfc_cq_event, list);
4017 spin_unlock_irqrestore(&phba->hbalock, iflag);
4019 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4020 case CQE_CODE_COMPL_WQE:
4021 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4023 /* Translate ELS WCQE to response IOCBQ */
4024 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4027 lpfc_sli_sp_handle_rspiocb(phba, pring,
4031 case CQE_CODE_RECEIVE:
4032 case CQE_CODE_RECEIVE_V1:
4033 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4035 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4042 /* Limit the number of events to 64 to avoid soft lockups */
4049 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4050 * @phba: Pointer to HBA context object.
4051 * @pring: Pointer to driver SLI ring object.
4053 * This function aborts all iocbs in the given ring and frees all the iocb
4054 * objects in txq. This function issues an abort iocb for all the iocb commands
4055 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4056 * the return of this function. The caller is not required to hold any locks.
4059 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4061 LIST_HEAD(completions);
4062 struct lpfc_iocbq *iocb, *next_iocb;
4064 if (pring->ringno == LPFC_ELS_RING) {
4065 lpfc_fabric_abort_hba(phba);
4068 /* Error everything on txq and txcmplq
4071 if (phba->sli_rev >= LPFC_SLI_REV4) {
4072 spin_lock_irq(&pring->ring_lock);
4073 list_splice_init(&pring->txq, &completions);
4075 spin_unlock_irq(&pring->ring_lock);
4077 spin_lock_irq(&phba->hbalock);
4078 /* Next issue ABTS for everything on the txcmplq */
4079 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4080 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4081 spin_unlock_irq(&phba->hbalock);
4083 spin_lock_irq(&phba->hbalock);
4084 list_splice_init(&pring->txq, &completions);
4087 /* Next issue ABTS for everything on the txcmplq */
4088 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4089 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4090 spin_unlock_irq(&phba->hbalock);
4093 /* Cancel all the IOCBs from the completions list */
4094 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4099 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4100 * @phba: Pointer to HBA context object.
4102 * This function aborts all iocbs in FCP rings and frees all the iocb
4103 * objects in txq. This function issues an abort iocb for all the iocb commands
4104 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4105 * the return of this function. The caller is not required to hold any locks.
4108 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4110 struct lpfc_sli *psli = &phba->sli;
4111 struct lpfc_sli_ring *pring;
4114 /* Look on all the FCP Rings for the iotag */
4115 if (phba->sli_rev >= LPFC_SLI_REV4) {
4116 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4117 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4118 lpfc_sli_abort_iocb_ring(phba, pring);
4121 pring = &psli->sli3_ring[LPFC_FCP_RING];
4122 lpfc_sli_abort_iocb_ring(phba, pring);
4127 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4128 * @phba: Pointer to HBA context object.
4130 * This function flushes all iocbs in the IO ring and frees all the iocb
4131 * objects in txq and txcmplq. This function will not issue abort iocbs
4132 * for all the iocb commands in txcmplq, they will just be returned with
4133 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4134 * slot has been permanently disabled.
4137 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4141 struct lpfc_sli *psli = &phba->sli;
4142 struct lpfc_sli_ring *pring;
4144 struct lpfc_iocbq *piocb, *next_iocb;
4146 spin_lock_irq(&phba->hbalock);
4147 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4148 !phba->sli4_hba.hdwq) {
4149 spin_unlock_irq(&phba->hbalock);
4152 /* Indicate the I/O queues are flushed */
4153 phba->hba_flag |= HBA_IOQ_FLUSH;
4154 spin_unlock_irq(&phba->hbalock);
4156 /* Look on all the FCP Rings for the iotag */
4157 if (phba->sli_rev >= LPFC_SLI_REV4) {
4158 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4159 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4161 spin_lock_irq(&pring->ring_lock);
4162 /* Retrieve everything on txq */
4163 list_splice_init(&pring->txq, &txq);
4164 list_for_each_entry_safe(piocb, next_iocb,
4165 &pring->txcmplq, list)
4166 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4167 /* Retrieve everything on the txcmplq */
4168 list_splice_init(&pring->txcmplq, &txcmplq);
4170 pring->txcmplq_cnt = 0;
4171 spin_unlock_irq(&pring->ring_lock);
4174 lpfc_sli_cancel_iocbs(phba, &txq,
4175 IOSTAT_LOCAL_REJECT,
4177 /* Flush the txcmpq */
4178 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4179 IOSTAT_LOCAL_REJECT,
4183 pring = &psli->sli3_ring[LPFC_FCP_RING];
4185 spin_lock_irq(&phba->hbalock);
4186 /* Retrieve everything on txq */
4187 list_splice_init(&pring->txq, &txq);
4188 list_for_each_entry_safe(piocb, next_iocb,
4189 &pring->txcmplq, list)
4190 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4191 /* Retrieve everything on the txcmplq */
4192 list_splice_init(&pring->txcmplq, &txcmplq);
4194 pring->txcmplq_cnt = 0;
4195 spin_unlock_irq(&phba->hbalock);
4198 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4200 /* Flush the txcmpq */
4201 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4207 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4208 * @phba: Pointer to HBA context object.
4209 * @mask: Bit mask to be checked.
4211 * This function reads the host status register and compares
4212 * with the provided bit mask to check if HBA completed
4213 * the restart. This function will wait in a loop for the
4214 * HBA to complete restart. If the HBA does not restart within
4215 * 15 iterations, the function will reset the HBA again. The
4216 * function returns 1 when HBA fail to restart otherwise returns
4220 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4226 /* Read the HBA Host Status Register */
4227 if (lpfc_readl(phba->HSregaddr, &status))
4231 * Check status register every 100ms for 5 retries, then every
4232 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4233 * every 2.5 sec for 4.
4234 * Break our of the loop if errors occurred during init.
4236 while (((status & mask) != mask) &&
4237 !(status & HS_FFERM) &&
4249 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4250 lpfc_sli_brdrestart(phba);
4252 /* Read the HBA Host Status Register */
4253 if (lpfc_readl(phba->HSregaddr, &status)) {
4259 /* Check to see if any errors occurred during init */
4260 if ((status & HS_FFERM) || (i >= 20)) {
4261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4262 "2751 Adapter failed to restart, "
4263 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4265 readl(phba->MBslimaddr + 0xa8),
4266 readl(phba->MBslimaddr + 0xac));
4267 phba->link_state = LPFC_HBA_ERROR;
4275 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4276 * @phba: Pointer to HBA context object.
4277 * @mask: Bit mask to be checked.
4279 * This function checks the host status register to check if HBA is
4280 * ready. This function will wait in a loop for the HBA to be ready
4281 * If the HBA is not ready , the function will will reset the HBA PCI
4282 * function again. The function returns 1 when HBA fail to be ready
4283 * otherwise returns zero.
4286 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4291 /* Read the HBA Host Status Register */
4292 status = lpfc_sli4_post_status_check(phba);
4295 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4296 lpfc_sli_brdrestart(phba);
4297 status = lpfc_sli4_post_status_check(phba);
4300 /* Check to see if any errors occurred during init */
4302 phba->link_state = LPFC_HBA_ERROR;
4305 phba->sli4_hba.intr_enable = 0;
4311 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4312 * @phba: Pointer to HBA context object.
4313 * @mask: Bit mask to be checked.
4315 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4316 * from the API jump table function pointer from the lpfc_hba struct.
4319 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4321 return phba->lpfc_sli_brdready(phba, mask);
4324 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4327 * lpfc_reset_barrier - Make HBA ready for HBA reset
4328 * @phba: Pointer to HBA context object.
4330 * This function is called before resetting an HBA. This function is called
4331 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4333 void lpfc_reset_barrier(struct lpfc_hba *phba)
4335 uint32_t __iomem *resp_buf;
4336 uint32_t __iomem *mbox_buf;
4337 volatile uint32_t mbox;
4338 uint32_t hc_copy, ha_copy, resp_data;
4342 lockdep_assert_held(&phba->hbalock);
4344 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4345 if (hdrtype != 0x80 ||
4346 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4347 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4351 * Tell the other part of the chip to suspend temporarily all
4354 resp_buf = phba->MBslimaddr;
4356 /* Disable the error attention */
4357 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4359 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4360 readl(phba->HCregaddr); /* flush */
4361 phba->link_flag |= LS_IGNORE_ERATT;
4363 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4365 if (ha_copy & HA_ERATT) {
4366 /* Clear Chip error bit */
4367 writel(HA_ERATT, phba->HAregaddr);
4368 phba->pport->stopped = 1;
4372 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4373 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4375 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4376 mbox_buf = phba->MBslimaddr;
4377 writel(mbox, mbox_buf);
4379 for (i = 0; i < 50; i++) {
4380 if (lpfc_readl((resp_buf + 1), &resp_data))
4382 if (resp_data != ~(BARRIER_TEST_PATTERN))
4388 if (lpfc_readl((resp_buf + 1), &resp_data))
4390 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4391 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4392 phba->pport->stopped)
4398 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4400 for (i = 0; i < 500; i++) {
4401 if (lpfc_readl(resp_buf, &resp_data))
4403 if (resp_data != mbox)
4412 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4414 if (!(ha_copy & HA_ERATT))
4420 if (readl(phba->HAregaddr) & HA_ERATT) {
4421 writel(HA_ERATT, phba->HAregaddr);
4422 phba->pport->stopped = 1;
4426 phba->link_flag &= ~LS_IGNORE_ERATT;
4427 writel(hc_copy, phba->HCregaddr);
4428 readl(phba->HCregaddr); /* flush */
4432 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4433 * @phba: Pointer to HBA context object.
4435 * This function issues a kill_board mailbox command and waits for
4436 * the error attention interrupt. This function is called for stopping
4437 * the firmware processing. The caller is not required to hold any
4438 * locks. This function calls lpfc_hba_down_post function to free
4439 * any pending commands after the kill. The function will return 1 when it
4440 * fails to kill the board else will return 0.
4443 lpfc_sli_brdkill(struct lpfc_hba *phba)
4445 struct lpfc_sli *psli;
4455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4456 "0329 Kill HBA Data: x%x x%x\n",
4457 phba->pport->port_state, psli->sli_flag);
4459 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4463 /* Disable the error attention */
4464 spin_lock_irq(&phba->hbalock);
4465 if (lpfc_readl(phba->HCregaddr, &status)) {
4466 spin_unlock_irq(&phba->hbalock);
4467 mempool_free(pmb, phba->mbox_mem_pool);
4470 status &= ~HC_ERINT_ENA;
4471 writel(status, phba->HCregaddr);
4472 readl(phba->HCregaddr); /* flush */
4473 phba->link_flag |= LS_IGNORE_ERATT;
4474 spin_unlock_irq(&phba->hbalock);
4476 lpfc_kill_board(phba, pmb);
4477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4478 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4480 if (retval != MBX_SUCCESS) {
4481 if (retval != MBX_BUSY)
4482 mempool_free(pmb, phba->mbox_mem_pool);
4483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4484 "2752 KILL_BOARD command failed retval %d\n",
4486 spin_lock_irq(&phba->hbalock);
4487 phba->link_flag &= ~LS_IGNORE_ERATT;
4488 spin_unlock_irq(&phba->hbalock);
4492 spin_lock_irq(&phba->hbalock);
4493 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4494 spin_unlock_irq(&phba->hbalock);
4496 mempool_free(pmb, phba->mbox_mem_pool);
4498 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4499 * attention every 100ms for 3 seconds. If we don't get ERATT after
4500 * 3 seconds we still set HBA_ERROR state because the status of the
4501 * board is now undefined.
4503 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4505 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4507 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4511 del_timer_sync(&psli->mbox_tmo);
4512 if (ha_copy & HA_ERATT) {
4513 writel(HA_ERATT, phba->HAregaddr);
4514 phba->pport->stopped = 1;
4516 spin_lock_irq(&phba->hbalock);
4517 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4518 psli->mbox_active = NULL;
4519 phba->link_flag &= ~LS_IGNORE_ERATT;
4520 spin_unlock_irq(&phba->hbalock);
4522 lpfc_hba_down_post(phba);
4523 phba->link_state = LPFC_HBA_ERROR;
4525 return ha_copy & HA_ERATT ? 0 : 1;
4529 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4530 * @phba: Pointer to HBA context object.
4532 * This function resets the HBA by writing HC_INITFF to the control
4533 * register. After the HBA resets, this function resets all the iocb ring
4534 * indices. This function disables PCI layer parity checking during
4536 * This function returns 0 always.
4537 * The caller is not required to hold any locks.
4540 lpfc_sli_brdreset(struct lpfc_hba *phba)
4542 struct lpfc_sli *psli;
4543 struct lpfc_sli_ring *pring;
4550 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4551 "0325 Reset HBA Data: x%x x%x\n",
4552 (phba->pport) ? phba->pport->port_state : 0,
4555 /* perform board reset */
4556 phba->fc_eventTag = 0;
4557 phba->link_events = 0;
4559 phba->pport->fc_myDID = 0;
4560 phba->pport->fc_prevDID = 0;
4563 /* Turn off parity checking and serr during the physical reset */
4564 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4567 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4571 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4573 /* Now toggle INITFF bit in the Host Control Register */
4574 writel(HC_INITFF, phba->HCregaddr);
4576 readl(phba->HCregaddr); /* flush */
4577 writel(0, phba->HCregaddr);
4578 readl(phba->HCregaddr); /* flush */
4580 /* Restore PCI cmd register */
4581 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4583 /* Initialize relevant SLI info */
4584 for (i = 0; i < psli->num_rings; i++) {
4585 pring = &psli->sli3_ring[i];
4587 pring->sli.sli3.rspidx = 0;
4588 pring->sli.sli3.next_cmdidx = 0;
4589 pring->sli.sli3.local_getidx = 0;
4590 pring->sli.sli3.cmdidx = 0;
4591 pring->missbufcnt = 0;
4594 phba->link_state = LPFC_WARM_START;
4599 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4600 * @phba: Pointer to HBA context object.
4602 * This function resets a SLI4 HBA. This function disables PCI layer parity
4603 * checking during resets the device. The caller is not required to hold
4606 * This function returns 0 on success else returns negative error code.
4609 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4611 struct lpfc_sli *psli = &phba->sli;
4616 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4617 "0295 Reset HBA Data: x%x x%x x%x\n",
4618 phba->pport->port_state, psli->sli_flag,
4621 /* perform board reset */
4622 phba->fc_eventTag = 0;
4623 phba->link_events = 0;
4624 phba->pport->fc_myDID = 0;
4625 phba->pport->fc_prevDID = 0;
4627 spin_lock_irq(&phba->hbalock);
4628 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4629 phba->fcf.fcf_flag = 0;
4630 spin_unlock_irq(&phba->hbalock);
4632 /* Now physically reset the device */
4633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4634 "0389 Performing PCI function reset!\n");
4636 /* Turn off parity checking and serr during the physical reset */
4637 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4639 "3205 PCI read Config failed\n");
4643 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4644 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4646 /* Perform FCoE PCI function reset before freeing queue memory */
4647 rc = lpfc_pci_function_reset(phba);
4649 /* Restore PCI cmd register */
4650 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4656 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4657 * @phba: Pointer to HBA context object.
4659 * This function is called in the SLI initialization code path to
4660 * restart the HBA. The caller is not required to hold any lock.
4661 * This function writes MBX_RESTART mailbox command to the SLIM and
4662 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4663 * function to free any pending commands. The function enables
4664 * POST only during the first initialization. The function returns zero.
4665 * The function does not guarantee completion of MBX_RESTART mailbox
4666 * command before the return of this function.
4669 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4672 struct lpfc_sli *psli;
4673 volatile uint32_t word0;
4674 void __iomem *to_slim;
4675 uint32_t hba_aer_enabled;
4677 spin_lock_irq(&phba->hbalock);
4679 /* Take PCIe device Advanced Error Reporting (AER) state */
4680 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4685 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4686 "0337 Restart HBA Data: x%x x%x\n",
4687 (phba->pport) ? phba->pport->port_state : 0,
4691 mb = (MAILBOX_t *) &word0;
4692 mb->mbxCommand = MBX_RESTART;
4695 lpfc_reset_barrier(phba);
4697 to_slim = phba->MBslimaddr;
4698 writel(*(uint32_t *) mb, to_slim);
4699 readl(to_slim); /* flush */
4701 /* Only skip post after fc_ffinit is completed */
4702 if (phba->pport && phba->pport->port_state)
4703 word0 = 1; /* This is really setting up word1 */
4705 word0 = 0; /* This is really setting up word1 */
4706 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4707 writel(*(uint32_t *) mb, to_slim);
4708 readl(to_slim); /* flush */
4710 lpfc_sli_brdreset(phba);
4712 phba->pport->stopped = 0;
4713 phba->link_state = LPFC_INIT_START;
4715 spin_unlock_irq(&phba->hbalock);
4717 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4718 psli->stats_start = ktime_get_seconds();
4720 /* Give the INITFF and Post time to settle. */
4723 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4724 if (hba_aer_enabled)
4725 pci_disable_pcie_error_reporting(phba->pcidev);
4727 lpfc_hba_down_post(phba);
4733 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4734 * @phba: Pointer to HBA context object.
4736 * This function is called in the SLI initialization code path to restart
4737 * a SLI4 HBA. The caller is not required to hold any lock.
4738 * At the end of the function, it calls lpfc_hba_down_post function to
4739 * free any pending commands.
4742 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4744 struct lpfc_sli *psli = &phba->sli;
4745 uint32_t hba_aer_enabled;
4749 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4750 "0296 Restart HBA Data: x%x x%x\n",
4751 phba->pport->port_state, psli->sli_flag);
4753 /* Take PCIe device Advanced Error Reporting (AER) state */
4754 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4756 rc = lpfc_sli4_brdreset(phba);
4758 phba->link_state = LPFC_HBA_ERROR;
4759 goto hba_down_queue;
4762 spin_lock_irq(&phba->hbalock);
4763 phba->pport->stopped = 0;
4764 phba->link_state = LPFC_INIT_START;
4766 spin_unlock_irq(&phba->hbalock);
4768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4769 psli->stats_start = ktime_get_seconds();
4771 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4772 if (hba_aer_enabled)
4773 pci_disable_pcie_error_reporting(phba->pcidev);
4776 lpfc_hba_down_post(phba);
4777 lpfc_sli4_queue_destroy(phba);
4783 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4784 * @phba: Pointer to HBA context object.
4786 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4787 * API jump table function pointer from the lpfc_hba struct.
4790 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4792 return phba->lpfc_sli_brdrestart(phba);
4796 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4797 * @phba: Pointer to HBA context object.
4799 * This function is called after a HBA restart to wait for successful
4800 * restart of the HBA. Successful restart of the HBA is indicated by
4801 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4802 * iteration, the function will restart the HBA again. The function returns
4803 * zero if HBA successfully restarted else returns negative error code.
4806 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4808 uint32_t status, i = 0;
4810 /* Read the HBA Host Status Register */
4811 if (lpfc_readl(phba->HSregaddr, &status))
4814 /* Check status register to see what current state is */
4816 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4818 /* Check every 10ms for 10 retries, then every 100ms for 90
4819 * retries, then every 1 sec for 50 retires for a total of
4820 * ~60 seconds before reset the board again and check every
4821 * 1 sec for 50 retries. The up to 60 seconds before the
4822 * board ready is required by the Falcon FIPS zeroization
4823 * complete, and any reset the board in between shall cause
4824 * restart of zeroization, further delay the board ready.
4827 /* Adapter failed to init, timeout, status reg
4829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4830 "0436 Adapter failed to init, "
4831 "timeout, status reg x%x, "
4832 "FW Data: A8 x%x AC x%x\n", status,
4833 readl(phba->MBslimaddr + 0xa8),
4834 readl(phba->MBslimaddr + 0xac));
4835 phba->link_state = LPFC_HBA_ERROR;
4839 /* Check to see if any errors occurred during init */
4840 if (status & HS_FFERM) {
4841 /* ERROR: During chipset initialization */
4842 /* Adapter failed to init, chipset, status reg
4844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4845 "0437 Adapter failed to init, "
4846 "chipset, status reg x%x, "
4847 "FW Data: A8 x%x AC x%x\n", status,
4848 readl(phba->MBslimaddr + 0xa8),
4849 readl(phba->MBslimaddr + 0xac));
4850 phba->link_state = LPFC_HBA_ERROR;
4863 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4864 lpfc_sli_brdrestart(phba);
4866 /* Read the HBA Host Status Register */
4867 if (lpfc_readl(phba->HSregaddr, &status))
4871 /* Check to see if any errors occurred during init */
4872 if (status & HS_FFERM) {
4873 /* ERROR: During chipset initialization */
4874 /* Adapter failed to init, chipset, status reg <status> */
4875 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4876 "0438 Adapter failed to init, chipset, "
4878 "FW Data: A8 x%x AC x%x\n", status,
4879 readl(phba->MBslimaddr + 0xa8),
4880 readl(phba->MBslimaddr + 0xac));
4881 phba->link_state = LPFC_HBA_ERROR;
4885 /* Clear all interrupt enable conditions */
4886 writel(0, phba->HCregaddr);
4887 readl(phba->HCregaddr); /* flush */
4889 /* setup host attn register */
4890 writel(0xffffffff, phba->HAregaddr);
4891 readl(phba->HAregaddr); /* flush */
4896 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4898 * This function calculates and returns the number of HBQs required to be
4902 lpfc_sli_hbq_count(void)
4904 return ARRAY_SIZE(lpfc_hbq_defs);
4908 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4910 * This function adds the number of hbq entries in every HBQ to get
4911 * the total number of hbq entries required for the HBA and returns
4915 lpfc_sli_hbq_entry_count(void)
4917 int hbq_count = lpfc_sli_hbq_count();
4921 for (i = 0; i < hbq_count; ++i)
4922 count += lpfc_hbq_defs[i]->entry_count;
4927 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4929 * This function calculates amount of memory required for all hbq entries
4930 * to be configured and returns the total memory required.
4933 lpfc_sli_hbq_size(void)
4935 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4939 * lpfc_sli_hbq_setup - configure and initialize HBQs
4940 * @phba: Pointer to HBA context object.
4942 * This function is called during the SLI initialization to configure
4943 * all the HBQs and post buffers to the HBQ. The caller is not
4944 * required to hold any locks. This function will return zero if successful
4945 * else it will return negative error code.
4948 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4950 int hbq_count = lpfc_sli_hbq_count();
4954 uint32_t hbq_entry_index;
4956 /* Get a Mailbox buffer to setup mailbox
4957 * commands for HBA initialization
4959 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4966 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4967 phba->link_state = LPFC_INIT_MBX_CMDS;
4968 phba->hbq_in_use = 1;
4970 hbq_entry_index = 0;
4971 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4972 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4973 phba->hbqs[hbqno].hbqPutIdx = 0;
4974 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4975 phba->hbqs[hbqno].entry_count =
4976 lpfc_hbq_defs[hbqno]->entry_count;
4977 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4978 hbq_entry_index, pmb);
4979 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4981 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4982 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4983 mbxStatus <status>, ring <num> */
4985 lpfc_printf_log(phba, KERN_ERR,
4986 LOG_SLI | LOG_VPORT,
4987 "1805 Adapter failed to init. "
4988 "Data: x%x x%x x%x\n",
4990 pmbox->mbxStatus, hbqno);
4992 phba->link_state = LPFC_HBA_ERROR;
4993 mempool_free(pmb, phba->mbox_mem_pool);
4997 phba->hbq_count = hbq_count;
4999 mempool_free(pmb, phba->mbox_mem_pool);
5001 /* Initially populate or replenish the HBQs */
5002 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5003 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5008 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5009 * @phba: Pointer to HBA context object.
5011 * This function is called during the SLI initialization to configure
5012 * all the HBQs and post buffers to the HBQ. The caller is not
5013 * required to hold any locks. This function will return zero if successful
5014 * else it will return negative error code.
5017 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5019 phba->hbq_in_use = 1;
5021 * Specific case when the MDS diagnostics is enabled and supported.
5022 * The receive buffer count is truncated to manage the incoming
5025 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5026 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5027 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5029 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5030 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5031 phba->hbq_count = 1;
5032 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5033 /* Initially populate or replenish the HBQs */
5038 * lpfc_sli_config_port - Issue config port mailbox command
5039 * @phba: Pointer to HBA context object.
5040 * @sli_mode: sli mode - 2/3
5042 * This function is called by the sli initialization code path
5043 * to issue config_port mailbox command. This function restarts the
5044 * HBA firmware and issues a config_port mailbox command to configure
5045 * the SLI interface in the sli mode specified by sli_mode
5046 * variable. The caller is not required to hold any locks.
5047 * The function returns 0 if successful, else returns negative error
5051 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5054 uint32_t resetcount = 0, rc = 0, done = 0;
5056 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5058 phba->link_state = LPFC_HBA_ERROR;
5062 phba->sli_rev = sli_mode;
5063 while (resetcount < 2 && !done) {
5064 spin_lock_irq(&phba->hbalock);
5065 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5066 spin_unlock_irq(&phba->hbalock);
5067 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5068 lpfc_sli_brdrestart(phba);
5069 rc = lpfc_sli_chipset_init(phba);
5073 spin_lock_irq(&phba->hbalock);
5074 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5075 spin_unlock_irq(&phba->hbalock);
5078 /* Call pre CONFIG_PORT mailbox command initialization. A
5079 * value of 0 means the call was successful. Any other
5080 * nonzero value is a failure, but if ERESTART is returned,
5081 * the driver may reset the HBA and try again.
5083 rc = lpfc_config_port_prep(phba);
5084 if (rc == -ERESTART) {
5085 phba->link_state = LPFC_LINK_UNKNOWN;
5090 phba->link_state = LPFC_INIT_MBX_CMDS;
5091 lpfc_config_port(phba, pmb);
5092 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5093 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5094 LPFC_SLI3_HBQ_ENABLED |
5095 LPFC_SLI3_CRP_ENABLED |
5096 LPFC_SLI3_DSS_ENABLED);
5097 if (rc != MBX_SUCCESS) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5099 "0442 Adapter failed to init, mbxCmd x%x "
5100 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5101 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5102 spin_lock_irq(&phba->hbalock);
5103 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5104 spin_unlock_irq(&phba->hbalock);
5107 /* Allow asynchronous mailbox command to go through */
5108 spin_lock_irq(&phba->hbalock);
5109 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5110 spin_unlock_irq(&phba->hbalock);
5113 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5114 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5116 "3110 Port did not grant ASABT\n");
5121 goto do_prep_failed;
5123 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5124 if (!pmb->u.mb.un.varCfgPort.cMA) {
5126 goto do_prep_failed;
5128 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5129 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5130 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5131 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5132 phba->max_vpi : phba->max_vports;
5136 if (pmb->u.mb.un.varCfgPort.gerbm)
5137 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5138 if (pmb->u.mb.un.varCfgPort.gcrp)
5139 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5141 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5142 phba->port_gp = phba->mbox->us.s3_pgp.port;
5144 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5145 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5146 phba->cfg_enable_bg = 0;
5147 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5149 "0443 Adapter did not grant "
5154 phba->hbq_get = NULL;
5155 phba->port_gp = phba->mbox->us.s2.port;
5159 mempool_free(pmb, phba->mbox_mem_pool);
5165 * lpfc_sli_hba_setup - SLI initialization function
5166 * @phba: Pointer to HBA context object.
5168 * This function is the main SLI initialization function. This function
5169 * is called by the HBA initialization code, HBA reset code and HBA
5170 * error attention handler code. Caller is not required to hold any
5171 * locks. This function issues config_port mailbox command to configure
5172 * the SLI, setup iocb rings and HBQ rings. In the end the function
5173 * calls the config_port_post function to issue init_link mailbox
5174 * command and to start the discovery. The function will return zero
5175 * if successful, else it will return negative error code.
5178 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5184 switch (phba->cfg_sli_mode) {
5186 if (phba->cfg_enable_npiv) {
5187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5188 "1824 NPIV enabled: Override sli_mode "
5189 "parameter (%d) to auto (0).\n",
5190 phba->cfg_sli_mode);
5199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5200 "1819 Unrecognized sli_mode parameter: %d.\n",
5201 phba->cfg_sli_mode);
5205 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5207 rc = lpfc_sli_config_port(phba, mode);
5209 if (rc && phba->cfg_sli_mode == 3)
5210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5211 "1820 Unable to select SLI-3. "
5212 "Not supported by adapter.\n");
5213 if (rc && mode != 2)
5214 rc = lpfc_sli_config_port(phba, 2);
5215 else if (rc && mode == 2)
5216 rc = lpfc_sli_config_port(phba, 3);
5218 goto lpfc_sli_hba_setup_error;
5220 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5221 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5222 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5225 "2709 This device supports "
5226 "Advanced Error Reporting (AER)\n");
5227 spin_lock_irq(&phba->hbalock);
5228 phba->hba_flag |= HBA_AER_ENABLED;
5229 spin_unlock_irq(&phba->hbalock);
5231 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5232 "2708 This device does not support "
5233 "Advanced Error Reporting (AER): %d\n",
5235 phba->cfg_aer_support = 0;
5239 if (phba->sli_rev == 3) {
5240 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5241 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5243 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5244 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5245 phba->sli3_options = 0;
5248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5249 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5250 phba->sli_rev, phba->max_vpi);
5251 rc = lpfc_sli_ring_map(phba);
5254 goto lpfc_sli_hba_setup_error;
5256 /* Initialize VPIs. */
5257 if (phba->sli_rev == LPFC_SLI_REV3) {
5259 * The VPI bitmask and physical ID array are allocated
5260 * and initialized once only - at driver load. A port
5261 * reset doesn't need to reinitialize this memory.
5263 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5264 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5265 phba->vpi_bmask = kcalloc(longs,
5266 sizeof(unsigned long),
5268 if (!phba->vpi_bmask) {
5270 goto lpfc_sli_hba_setup_error;
5273 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5276 if (!phba->vpi_ids) {
5277 kfree(phba->vpi_bmask);
5279 goto lpfc_sli_hba_setup_error;
5281 for (i = 0; i < phba->max_vpi; i++)
5282 phba->vpi_ids[i] = i;
5287 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5288 rc = lpfc_sli_hbq_setup(phba);
5290 goto lpfc_sli_hba_setup_error;
5292 spin_lock_irq(&phba->hbalock);
5293 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5294 spin_unlock_irq(&phba->hbalock);
5296 rc = lpfc_config_port_post(phba);
5298 goto lpfc_sli_hba_setup_error;
5302 lpfc_sli_hba_setup_error:
5303 phba->link_state = LPFC_HBA_ERROR;
5304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5305 "0445 Firmware initialization failed\n");
5310 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5311 * @phba: Pointer to HBA context object.
5313 * This function issue a dump mailbox command to read config region
5314 * 23 and parse the records in the region and populate driver
5318 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5320 LPFC_MBOXQ_t *mboxq;
5321 struct lpfc_dmabuf *mp;
5322 struct lpfc_mqe *mqe;
5323 uint32_t data_length;
5326 /* Program the default value of vlan_id and fc_map */
5327 phba->valid_vlan = 0;
5328 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5329 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5330 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5332 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5336 mqe = &mboxq->u.mqe;
5337 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5339 goto out_free_mboxq;
5342 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5343 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5345 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5346 "(%d):2571 Mailbox cmd x%x Status x%x "
5347 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5348 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5349 "CQ: x%x x%x x%x x%x\n",
5350 mboxq->vport ? mboxq->vport->vpi : 0,
5351 bf_get(lpfc_mqe_command, mqe),
5352 bf_get(lpfc_mqe_status, mqe),
5353 mqe->un.mb_words[0], mqe->un.mb_words[1],
5354 mqe->un.mb_words[2], mqe->un.mb_words[3],
5355 mqe->un.mb_words[4], mqe->un.mb_words[5],
5356 mqe->un.mb_words[6], mqe->un.mb_words[7],
5357 mqe->un.mb_words[8], mqe->un.mb_words[9],
5358 mqe->un.mb_words[10], mqe->un.mb_words[11],
5359 mqe->un.mb_words[12], mqe->un.mb_words[13],
5360 mqe->un.mb_words[14], mqe->un.mb_words[15],
5361 mqe->un.mb_words[16], mqe->un.mb_words[50],
5363 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5364 mboxq->mcqe.trailer);
5367 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5370 goto out_free_mboxq;
5372 data_length = mqe->un.mb_words[5];
5373 if (data_length > DMP_RGN23_SIZE) {
5374 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5377 goto out_free_mboxq;
5380 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5381 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5386 mempool_free(mboxq, phba->mbox_mem_pool);
5391 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5392 * @phba: pointer to lpfc hba data structure.
5393 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5394 * @vpd: pointer to the memory to hold resulting port vpd data.
5395 * @vpd_size: On input, the number of bytes allocated to @vpd.
5396 * On output, the number of data bytes in @vpd.
5398 * This routine executes a READ_REV SLI4 mailbox command. In
5399 * addition, this routine gets the port vpd data.
5403 * -ENOMEM - could not allocated memory.
5406 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5407 uint8_t *vpd, uint32_t *vpd_size)
5411 struct lpfc_dmabuf *dmabuf;
5412 struct lpfc_mqe *mqe;
5414 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5419 * Get a DMA buffer for the vpd data resulting from the READ_REV
5422 dma_size = *vpd_size;
5423 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5424 &dmabuf->phys, GFP_KERNEL);
5425 if (!dmabuf->virt) {
5431 * The SLI4 implementation of READ_REV conflicts at word1,
5432 * bits 31:16 and SLI4 adds vpd functionality not present
5433 * in SLI3. This code corrects the conflicts.
5435 lpfc_read_rev(phba, mboxq);
5436 mqe = &mboxq->u.mqe;
5437 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5438 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5439 mqe->un.read_rev.word1 &= 0x0000FFFF;
5440 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5441 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5445 dma_free_coherent(&phba->pcidev->dev, dma_size,
5446 dmabuf->virt, dmabuf->phys);
5452 * The available vpd length cannot be bigger than the
5453 * DMA buffer passed to the port. Catch the less than
5454 * case and update the caller's size.
5456 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5457 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5459 memcpy(vpd, dmabuf->virt, *vpd_size);
5461 dma_free_coherent(&phba->pcidev->dev, dma_size,
5462 dmabuf->virt, dmabuf->phys);
5468 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5469 * @phba: pointer to lpfc hba data structure.
5471 * This routine retrieves SLI4 device physical port name this PCI function
5476 * otherwise - failed to retrieve controller attributes
5479 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5481 LPFC_MBOXQ_t *mboxq;
5482 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5483 struct lpfc_controller_attribute *cntl_attr;
5484 void *virtaddr = NULL;
5485 uint32_t alloclen, reqlen;
5486 uint32_t shdr_status, shdr_add_status;
5487 union lpfc_sli4_cfg_shdr *shdr;
5490 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5494 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5495 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5496 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5497 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5498 LPFC_SLI4_MBX_NEMBED);
5500 if (alloclen < reqlen) {
5501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5502 "3084 Allocated DMA memory size (%d) is "
5503 "less than the requested DMA memory size "
5504 "(%d)\n", alloclen, reqlen);
5506 goto out_free_mboxq;
5508 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5509 virtaddr = mboxq->sge_array->addr[0];
5510 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5511 shdr = &mbx_cntl_attr->cfg_shdr;
5512 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5513 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5514 if (shdr_status || shdr_add_status || rc) {
5515 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5516 "3085 Mailbox x%x (x%x/x%x) failed, "
5517 "rc:x%x, status:x%x, add_status:x%x\n",
5518 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5519 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5520 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5521 rc, shdr_status, shdr_add_status);
5523 goto out_free_mboxq;
5526 cntl_attr = &mbx_cntl_attr->cntl_attr;
5527 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5528 phba->sli4_hba.lnk_info.lnk_tp =
5529 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5530 phba->sli4_hba.lnk_info.lnk_no =
5531 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5533 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5534 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5535 sizeof(phba->BIOSVersion));
5537 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5538 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5539 phba->sli4_hba.lnk_info.lnk_tp,
5540 phba->sli4_hba.lnk_info.lnk_no,
5543 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5544 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5546 mempool_free(mboxq, phba->mbox_mem_pool);
5551 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5552 * @phba: pointer to lpfc hba data structure.
5554 * This routine retrieves SLI4 device physical port name this PCI function
5559 * otherwise - failed to retrieve physical port name
5562 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5564 LPFC_MBOXQ_t *mboxq;
5565 struct lpfc_mbx_get_port_name *get_port_name;
5566 uint32_t shdr_status, shdr_add_status;
5567 union lpfc_sli4_cfg_shdr *shdr;
5568 char cport_name = 0;
5571 /* We assume nothing at this point */
5572 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5573 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5575 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5578 /* obtain link type and link number via READ_CONFIG */
5579 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5580 lpfc_sli4_read_config(phba);
5581 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5582 goto retrieve_ppname;
5584 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5585 rc = lpfc_sli4_get_ctl_attr(phba);
5587 goto out_free_mboxq;
5590 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5591 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5592 sizeof(struct lpfc_mbx_get_port_name) -
5593 sizeof(struct lpfc_sli4_cfg_mhdr),
5594 LPFC_SLI4_MBX_EMBED);
5595 get_port_name = &mboxq->u.mqe.un.get_port_name;
5596 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5597 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5598 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5599 phba->sli4_hba.lnk_info.lnk_tp);
5600 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5601 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5602 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5603 if (shdr_status || shdr_add_status || rc) {
5604 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5605 "3087 Mailbox x%x (x%x/x%x) failed: "
5606 "rc:x%x, status:x%x, add_status:x%x\n",
5607 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5608 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5609 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5610 rc, shdr_status, shdr_add_status);
5612 goto out_free_mboxq;
5614 switch (phba->sli4_hba.lnk_info.lnk_no) {
5615 case LPFC_LINK_NUMBER_0:
5616 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5617 &get_port_name->u.response);
5618 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5620 case LPFC_LINK_NUMBER_1:
5621 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5622 &get_port_name->u.response);
5623 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5625 case LPFC_LINK_NUMBER_2:
5626 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5627 &get_port_name->u.response);
5628 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5630 case LPFC_LINK_NUMBER_3:
5631 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5632 &get_port_name->u.response);
5633 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5639 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5640 phba->Port[0] = cport_name;
5641 phba->Port[1] = '\0';
5642 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5643 "3091 SLI get port name: %s\n", phba->Port);
5647 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5648 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5650 mempool_free(mboxq, phba->mbox_mem_pool);
5655 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5656 * @phba: pointer to lpfc hba data structure.
5658 * This routine is called to explicitly arm the SLI4 device's completion and
5662 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5665 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5666 struct lpfc_sli4_hdw_queue *qp;
5667 struct lpfc_queue *eq;
5669 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5670 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5671 if (sli4_hba->nvmels_cq)
5672 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5675 if (sli4_hba->hdwq) {
5676 /* Loop thru all Hardware Queues */
5677 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5678 qp = &sli4_hba->hdwq[qidx];
5679 /* ARM the corresponding CQ */
5680 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5684 /* Loop thru all IRQ vectors */
5685 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5686 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5687 /* ARM the corresponding EQ */
5688 sli4_hba->sli4_write_eq_db(phba, eq,
5689 0, LPFC_QUEUE_REARM);
5693 if (phba->nvmet_support) {
5694 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5695 sli4_hba->sli4_write_cq_db(phba,
5696 sli4_hba->nvmet_cqset[qidx], 0,
5703 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5704 * @phba: Pointer to HBA context object.
5705 * @type: The resource extent type.
5706 * @extnt_count: buffer to hold port available extent count.
5707 * @extnt_size: buffer to hold element count per extent.
5709 * This function calls the port and retrievs the number of available
5710 * extents and their size for a particular extent type.
5712 * Returns: 0 if successful. Nonzero otherwise.
5715 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5716 uint16_t *extnt_count, uint16_t *extnt_size)
5721 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5724 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5728 /* Find out how many extents are available for this resource type */
5729 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5730 sizeof(struct lpfc_sli4_cfg_mhdr));
5731 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5732 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5733 length, LPFC_SLI4_MBX_EMBED);
5735 /* Send an extents count of 0 - the GET doesn't use it. */
5736 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5737 LPFC_SLI4_MBX_EMBED);
5743 if (!phba->sli4_hba.intr_enable)
5744 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5746 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5747 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5754 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5755 if (bf_get(lpfc_mbox_hdr_status,
5756 &rsrc_info->header.cfg_shdr.response)) {
5757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5758 "2930 Failed to get resource extents "
5759 "Status 0x%x Add'l Status 0x%x\n",
5760 bf_get(lpfc_mbox_hdr_status,
5761 &rsrc_info->header.cfg_shdr.response),
5762 bf_get(lpfc_mbox_hdr_add_status,
5763 &rsrc_info->header.cfg_shdr.response));
5768 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5770 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5773 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5774 "3162 Retrieved extents type-%d from port: count:%d, "
5775 "size:%d\n", type, *extnt_count, *extnt_size);
5778 mempool_free(mbox, phba->mbox_mem_pool);
5783 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5784 * @phba: Pointer to HBA context object.
5785 * @type: The extent type to check.
5787 * This function reads the current available extents from the port and checks
5788 * if the extent count or extent size has changed since the last access.
5789 * Callers use this routine post port reset to understand if there is a
5790 * extent reprovisioning requirement.
5793 * -Error: error indicates problem.
5794 * 1: Extent count or size has changed.
5798 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5800 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5801 uint16_t size_diff, rsrc_ext_size;
5803 struct lpfc_rsrc_blks *rsrc_entry;
5804 struct list_head *rsrc_blk_list = NULL;
5808 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5815 case LPFC_RSC_TYPE_FCOE_RPI:
5816 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5818 case LPFC_RSC_TYPE_FCOE_VPI:
5819 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5821 case LPFC_RSC_TYPE_FCOE_XRI:
5822 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5824 case LPFC_RSC_TYPE_FCOE_VFI:
5825 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5831 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5833 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5837 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5844 * lpfc_sli4_cfg_post_extnts -
5845 * @phba: Pointer to HBA context object.
5846 * @extnt_cnt: number of available extents.
5847 * @type: the extent type (rpi, xri, vfi, vpi).
5848 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5849 * @mbox: pointer to the caller's allocated mailbox structure.
5851 * This function executes the extents allocation request. It also
5852 * takes care of the amount of memory needed to allocate or get the
5853 * allocated extents. It is the caller's responsibility to evaluate
5857 * -Error: Error value describes the condition found.
5861 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5862 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5867 uint32_t alloc_len, mbox_tmo;
5869 /* Calculate the total requested length of the dma memory */
5870 req_len = extnt_cnt * sizeof(uint16_t);
5873 * Calculate the size of an embedded mailbox. The uint32_t
5874 * accounts for extents-specific word.
5876 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5880 * Presume the allocation and response will fit into an embedded
5881 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5883 *emb = LPFC_SLI4_MBX_EMBED;
5884 if (req_len > emb_len) {
5885 req_len = extnt_cnt * sizeof(uint16_t) +
5886 sizeof(union lpfc_sli4_cfg_shdr) +
5888 *emb = LPFC_SLI4_MBX_NEMBED;
5891 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5892 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5894 if (alloc_len < req_len) {
5895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5896 "2982 Allocated DMA memory size (x%x) is "
5897 "less than the requested DMA memory "
5898 "size (x%x)\n", alloc_len, req_len);
5901 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5905 if (!phba->sli4_hba.intr_enable)
5906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5908 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5909 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5918 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5919 * @phba: Pointer to HBA context object.
5920 * @type: The resource extent type to allocate.
5922 * This function allocates the number of elements for the specified
5926 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5929 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5930 uint16_t rsrc_id, rsrc_start, j, k;
5933 unsigned long longs;
5934 unsigned long *bmask;
5935 struct lpfc_rsrc_blks *rsrc_blks;
5938 struct lpfc_id_range *id_array = NULL;
5939 void *virtaddr = NULL;
5940 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5941 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5942 struct list_head *ext_blk_list;
5944 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5950 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5952 "3009 No available Resource Extents "
5953 "for resource type 0x%x: Count: 0x%x, "
5954 "Size 0x%x\n", type, rsrc_cnt,
5959 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5960 "2903 Post resource extents type-0x%x: "
5961 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5963 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5967 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5974 * Figure out where the response is located. Then get local pointers
5975 * to the response data. The port does not guarantee to respond to
5976 * all extents counts request so update the local variable with the
5977 * allocated count from the port.
5979 if (emb == LPFC_SLI4_MBX_EMBED) {
5980 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5981 id_array = &rsrc_ext->u.rsp.id[0];
5982 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5984 virtaddr = mbox->sge_array->addr[0];
5985 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5986 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5987 id_array = &n_rsrc->id;
5990 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5991 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5994 * Based on the resource size and count, correct the base and max
5997 length = sizeof(struct lpfc_rsrc_blks);
5999 case LPFC_RSC_TYPE_FCOE_RPI:
6000 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6001 sizeof(unsigned long),
6003 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6007 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6010 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6011 kfree(phba->sli4_hba.rpi_bmask);
6017 * The next_rpi was initialized with the maximum available
6018 * count but the port may allocate a smaller number. Catch
6019 * that case and update the next_rpi.
6021 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6023 /* Initialize local ptrs for common extent processing later. */
6024 bmask = phba->sli4_hba.rpi_bmask;
6025 ids = phba->sli4_hba.rpi_ids;
6026 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6028 case LPFC_RSC_TYPE_FCOE_VPI:
6029 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6031 if (unlikely(!phba->vpi_bmask)) {
6035 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6037 if (unlikely(!phba->vpi_ids)) {
6038 kfree(phba->vpi_bmask);
6043 /* Initialize local ptrs for common extent processing later. */
6044 bmask = phba->vpi_bmask;
6045 ids = phba->vpi_ids;
6046 ext_blk_list = &phba->lpfc_vpi_blk_list;
6048 case LPFC_RSC_TYPE_FCOE_XRI:
6049 phba->sli4_hba.xri_bmask = kcalloc(longs,
6050 sizeof(unsigned long),
6052 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6056 phba->sli4_hba.max_cfg_param.xri_used = 0;
6057 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6060 if (unlikely(!phba->sli4_hba.xri_ids)) {
6061 kfree(phba->sli4_hba.xri_bmask);
6066 /* Initialize local ptrs for common extent processing later. */
6067 bmask = phba->sli4_hba.xri_bmask;
6068 ids = phba->sli4_hba.xri_ids;
6069 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6071 case LPFC_RSC_TYPE_FCOE_VFI:
6072 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6073 sizeof(unsigned long),
6075 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6079 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6082 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6083 kfree(phba->sli4_hba.vfi_bmask);
6088 /* Initialize local ptrs for common extent processing later. */
6089 bmask = phba->sli4_hba.vfi_bmask;
6090 ids = phba->sli4_hba.vfi_ids;
6091 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6094 /* Unsupported Opcode. Fail call. */
6098 ext_blk_list = NULL;
6103 * Complete initializing the extent configuration with the
6104 * allocated ids assigned to this function. The bitmask serves
6105 * as an index into the array and manages the available ids. The
6106 * array just stores the ids communicated to the port via the wqes.
6108 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6110 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6113 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6116 rsrc_blks = kzalloc(length, GFP_KERNEL);
6117 if (unlikely(!rsrc_blks)) {
6123 rsrc_blks->rsrc_start = rsrc_id;
6124 rsrc_blks->rsrc_size = rsrc_size;
6125 list_add_tail(&rsrc_blks->list, ext_blk_list);
6126 rsrc_start = rsrc_id;
6127 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6128 phba->sli4_hba.io_xri_start = rsrc_start +
6129 lpfc_sli4_get_iocb_cnt(phba);
6132 while (rsrc_id < (rsrc_start + rsrc_size)) {
6137 /* Entire word processed. Get next word.*/
6142 lpfc_sli4_mbox_cmd_free(phba, mbox);
6149 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6150 * @phba: Pointer to HBA context object.
6151 * @type: the extent's type.
6153 * This function deallocates all extents of a particular resource type.
6154 * SLI4 does not allow for deallocating a particular extent range. It
6155 * is the caller's responsibility to release all kernel memory resources.
6158 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6161 uint32_t length, mbox_tmo = 0;
6163 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6164 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6166 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6171 * This function sends an embedded mailbox because it only sends the
6172 * the resource type. All extents of this type are released by the
6175 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6176 sizeof(struct lpfc_sli4_cfg_mhdr));
6177 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6178 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6179 length, LPFC_SLI4_MBX_EMBED);
6181 /* Send an extents count of 0 - the dealloc doesn't use it. */
6182 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6183 LPFC_SLI4_MBX_EMBED);
6188 if (!phba->sli4_hba.intr_enable)
6189 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6191 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6192 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6199 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6200 if (bf_get(lpfc_mbox_hdr_status,
6201 &dealloc_rsrc->header.cfg_shdr.response)) {
6202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6203 "2919 Failed to release resource extents "
6204 "for type %d - Status 0x%x Add'l Status 0x%x. "
6205 "Resource memory not released.\n",
6207 bf_get(lpfc_mbox_hdr_status,
6208 &dealloc_rsrc->header.cfg_shdr.response),
6209 bf_get(lpfc_mbox_hdr_add_status,
6210 &dealloc_rsrc->header.cfg_shdr.response));
6215 /* Release kernel memory resources for the specific type. */
6217 case LPFC_RSC_TYPE_FCOE_VPI:
6218 kfree(phba->vpi_bmask);
6219 kfree(phba->vpi_ids);
6220 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6221 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6222 &phba->lpfc_vpi_blk_list, list) {
6223 list_del_init(&rsrc_blk->list);
6226 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6228 case LPFC_RSC_TYPE_FCOE_XRI:
6229 kfree(phba->sli4_hba.xri_bmask);
6230 kfree(phba->sli4_hba.xri_ids);
6231 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6232 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6233 list_del_init(&rsrc_blk->list);
6237 case LPFC_RSC_TYPE_FCOE_VFI:
6238 kfree(phba->sli4_hba.vfi_bmask);
6239 kfree(phba->sli4_hba.vfi_ids);
6240 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6241 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6242 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6243 list_del_init(&rsrc_blk->list);
6247 case LPFC_RSC_TYPE_FCOE_RPI:
6248 /* RPI bitmask and physical id array are cleaned up earlier. */
6249 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6250 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6251 list_del_init(&rsrc_blk->list);
6259 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6262 mempool_free(mbox, phba->mbox_mem_pool);
6267 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6272 len = sizeof(struct lpfc_mbx_set_feature) -
6273 sizeof(struct lpfc_sli4_cfg_mhdr);
6274 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6275 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6276 LPFC_SLI4_MBX_EMBED);
6279 case LPFC_SET_UE_RECOVERY:
6280 bf_set(lpfc_mbx_set_feature_UER,
6281 &mbox->u.mqe.un.set_feature, 1);
6282 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6283 mbox->u.mqe.un.set_feature.param_len = 8;
6285 case LPFC_SET_MDS_DIAGS:
6286 bf_set(lpfc_mbx_set_feature_mds,
6287 &mbox->u.mqe.un.set_feature, 1);
6288 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6289 &mbox->u.mqe.un.set_feature, 1);
6290 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6291 mbox->u.mqe.un.set_feature.param_len = 8;
6293 case LPFC_SET_DUAL_DUMP:
6294 bf_set(lpfc_mbx_set_feature_dd,
6295 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6296 bf_set(lpfc_mbx_set_feature_ddquery,
6297 &mbox->u.mqe.un.set_feature, 0);
6298 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6299 mbox->u.mqe.un.set_feature.param_len = 4;
6307 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6308 * @phba: Pointer to HBA context object.
6310 * Disable FW logging into host memory on the adapter. To
6311 * be done before reading logs from the host memory.
6314 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6316 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6318 spin_lock_irq(&phba->hbalock);
6319 ras_fwlog->state = INACTIVE;
6320 spin_unlock_irq(&phba->hbalock);
6322 /* Disable FW logging to host memory */
6323 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6324 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6326 /* Wait 10ms for firmware to stop using DMA buffer */
6327 usleep_range(10 * 1000, 20 * 1000);
6331 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6332 * @phba: Pointer to HBA context object.
6334 * This function is called to free memory allocated for RAS FW logging
6335 * support in the driver.
6338 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6340 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6341 struct lpfc_dmabuf *dmabuf, *next;
6343 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6344 list_for_each_entry_safe(dmabuf, next,
6345 &ras_fwlog->fwlog_buff_list,
6347 list_del(&dmabuf->list);
6348 dma_free_coherent(&phba->pcidev->dev,
6349 LPFC_RAS_MAX_ENTRY_SIZE,
6350 dmabuf->virt, dmabuf->phys);
6355 if (ras_fwlog->lwpd.virt) {
6356 dma_free_coherent(&phba->pcidev->dev,
6357 sizeof(uint32_t) * 2,
6358 ras_fwlog->lwpd.virt,
6359 ras_fwlog->lwpd.phys);
6360 ras_fwlog->lwpd.virt = NULL;
6363 spin_lock_irq(&phba->hbalock);
6364 ras_fwlog->state = INACTIVE;
6365 spin_unlock_irq(&phba->hbalock);
6369 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6370 * @phba: Pointer to HBA context object.
6371 * @fwlog_buff_count: Count of buffers to be created.
6373 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6374 * to update FW log is posted to the adapter.
6375 * Buffer count is calculated based on module param ras_fwlog_buffsize
6376 * Size of each buffer posted to FW is 64K.
6380 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6381 uint32_t fwlog_buff_count)
6383 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6384 struct lpfc_dmabuf *dmabuf;
6387 /* Initialize List */
6388 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6390 /* Allocate memory for the LWPD */
6391 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6392 sizeof(uint32_t) * 2,
6393 &ras_fwlog->lwpd.phys,
6395 if (!ras_fwlog->lwpd.virt) {
6396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6397 "6185 LWPD Memory Alloc Failed\n");
6402 ras_fwlog->fw_buffcount = fwlog_buff_count;
6403 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6404 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6408 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6409 "6186 Memory Alloc failed FW logging");
6413 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6414 LPFC_RAS_MAX_ENTRY_SIZE,
6415 &dmabuf->phys, GFP_KERNEL);
6416 if (!dmabuf->virt) {
6419 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6420 "6187 DMA Alloc Failed FW logging");
6423 dmabuf->buffer_tag = i;
6424 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6429 lpfc_sli4_ras_dma_free(phba);
6435 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6436 * @phba: pointer to lpfc hba data structure.
6437 * @pmb: pointer to the driver internal queue element for mailbox command.
6439 * Completion handler for driver's RAS MBX command to the device.
6442 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6445 union lpfc_sli4_cfg_shdr *shdr;
6446 uint32_t shdr_status, shdr_add_status;
6447 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6451 shdr = (union lpfc_sli4_cfg_shdr *)
6452 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6453 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6454 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6456 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6458 "6188 FW LOG mailbox "
6459 "completed with status x%x add_status x%x,"
6460 " mbx status x%x\n",
6461 shdr_status, shdr_add_status, mb->mbxStatus);
6463 ras_fwlog->ras_hwsupport = false;
6467 spin_lock_irq(&phba->hbalock);
6468 ras_fwlog->state = ACTIVE;
6469 spin_unlock_irq(&phba->hbalock);
6470 mempool_free(pmb, phba->mbox_mem_pool);
6475 /* Free RAS DMA memory */
6476 lpfc_sli4_ras_dma_free(phba);
6477 mempool_free(pmb, phba->mbox_mem_pool);
6481 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6482 * @phba: pointer to lpfc hba data structure.
6483 * @fwlog_level: Logging verbosity level.
6484 * @fwlog_enable: Enable/Disable logging.
6486 * Initialize memory and post mailbox command to enable FW logging in host
6490 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6491 uint32_t fwlog_level,
6492 uint32_t fwlog_enable)
6494 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6495 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6496 struct lpfc_dmabuf *dmabuf;
6498 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6501 spin_lock_irq(&phba->hbalock);
6502 ras_fwlog->state = INACTIVE;
6503 spin_unlock_irq(&phba->hbalock);
6505 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6506 phba->cfg_ras_fwlog_buffsize);
6507 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6510 * If re-enabling FW logging support use earlier allocated
6511 * DMA buffers while posting MBX command.
6513 if (!ras_fwlog->lwpd.virt) {
6514 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6517 "6189 FW Log Memory Allocation Failed");
6522 /* Setup Mailbox command */
6523 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6525 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6526 "6190 RAS MBX Alloc Failed");
6531 ras_fwlog->fw_loglevel = fwlog_level;
6532 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6533 sizeof(struct lpfc_sli4_cfg_mhdr));
6535 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6536 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6537 len, LPFC_SLI4_MBX_EMBED);
6539 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6540 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6542 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6543 ras_fwlog->fw_loglevel);
6544 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6545 ras_fwlog->fw_buffcount);
6546 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6547 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6549 /* Update DMA buffer address */
6550 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6551 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6553 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6554 putPaddrLow(dmabuf->phys);
6556 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6557 putPaddrHigh(dmabuf->phys);
6560 /* Update LPWD address */
6561 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6562 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6564 spin_lock_irq(&phba->hbalock);
6565 ras_fwlog->state = REG_INPROGRESS;
6566 spin_unlock_irq(&phba->hbalock);
6567 mbox->vport = phba->pport;
6568 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6570 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6572 if (rc == MBX_NOT_FINISHED) {
6573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6574 "6191 FW-Log Mailbox failed. "
6575 "status %d mbxStatus : x%x", rc,
6576 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6577 mempool_free(mbox, phba->mbox_mem_pool);
6584 lpfc_sli4_ras_dma_free(phba);
6590 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6591 * @phba: Pointer to HBA context object.
6593 * Check if RAS is supported on the adapter and initialize it.
6596 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6598 /* Check RAS FW Log needs to be enabled or not */
6599 if (lpfc_check_fwlog_support(phba))
6602 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6603 LPFC_RAS_ENABLE_LOGGING);
6607 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6608 * @phba: Pointer to HBA context object.
6610 * This function allocates all SLI4 resource identifiers.
6613 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6615 int i, rc, error = 0;
6616 uint16_t count, base;
6617 unsigned long longs;
6619 if (!phba->sli4_hba.rpi_hdrs_in_use)
6620 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6621 if (phba->sli4_hba.extents_in_use) {
6623 * The port supports resource extents. The XRI, VPI, VFI, RPI
6624 * resource extent count must be read and allocated before
6625 * provisioning the resource id arrays.
6627 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6628 LPFC_IDX_RSRC_RDY) {
6630 * Extent-based resources are set - the driver could
6631 * be in a port reset. Figure out if any corrective
6632 * actions need to be taken.
6634 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6635 LPFC_RSC_TYPE_FCOE_VFI);
6638 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6639 LPFC_RSC_TYPE_FCOE_VPI);
6642 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6643 LPFC_RSC_TYPE_FCOE_XRI);
6646 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6647 LPFC_RSC_TYPE_FCOE_RPI);
6652 * It's possible that the number of resources
6653 * provided to this port instance changed between
6654 * resets. Detect this condition and reallocate
6655 * resources. Otherwise, there is no action.
6658 lpfc_printf_log(phba, KERN_INFO,
6659 LOG_MBOX | LOG_INIT,
6660 "2931 Detected extent resource "
6661 "change. Reallocating all "
6663 rc = lpfc_sli4_dealloc_extent(phba,
6664 LPFC_RSC_TYPE_FCOE_VFI);
6665 rc = lpfc_sli4_dealloc_extent(phba,
6666 LPFC_RSC_TYPE_FCOE_VPI);
6667 rc = lpfc_sli4_dealloc_extent(phba,
6668 LPFC_RSC_TYPE_FCOE_XRI);
6669 rc = lpfc_sli4_dealloc_extent(phba,
6670 LPFC_RSC_TYPE_FCOE_RPI);
6675 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6679 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6683 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6687 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6690 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6695 * The port does not support resource extents. The XRI, VPI,
6696 * VFI, RPI resource ids were determined from READ_CONFIG.
6697 * Just allocate the bitmasks and provision the resource id
6698 * arrays. If a port reset is active, the resources don't
6699 * need any action - just exit.
6701 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6702 LPFC_IDX_RSRC_RDY) {
6703 lpfc_sli4_dealloc_resource_identifiers(phba);
6704 lpfc_sli4_remove_rpis(phba);
6707 count = phba->sli4_hba.max_cfg_param.max_rpi;
6709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6710 "3279 Invalid provisioning of "
6715 base = phba->sli4_hba.max_cfg_param.rpi_base;
6716 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6717 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6718 sizeof(unsigned long),
6720 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6724 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6726 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6728 goto free_rpi_bmask;
6731 for (i = 0; i < count; i++)
6732 phba->sli4_hba.rpi_ids[i] = base + i;
6735 count = phba->sli4_hba.max_cfg_param.max_vpi;
6737 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6738 "3280 Invalid provisioning of "
6743 base = phba->sli4_hba.max_cfg_param.vpi_base;
6744 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6745 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6747 if (unlikely(!phba->vpi_bmask)) {
6751 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6753 if (unlikely(!phba->vpi_ids)) {
6755 goto free_vpi_bmask;
6758 for (i = 0; i < count; i++)
6759 phba->vpi_ids[i] = base + i;
6762 count = phba->sli4_hba.max_cfg_param.max_xri;
6764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6765 "3281 Invalid provisioning of "
6770 base = phba->sli4_hba.max_cfg_param.xri_base;
6771 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6772 phba->sli4_hba.xri_bmask = kcalloc(longs,
6773 sizeof(unsigned long),
6775 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6779 phba->sli4_hba.max_cfg_param.xri_used = 0;
6780 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6782 if (unlikely(!phba->sli4_hba.xri_ids)) {
6784 goto free_xri_bmask;
6787 for (i = 0; i < count; i++)
6788 phba->sli4_hba.xri_ids[i] = base + i;
6791 count = phba->sli4_hba.max_cfg_param.max_vfi;
6793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6794 "3282 Invalid provisioning of "
6799 base = phba->sli4_hba.max_cfg_param.vfi_base;
6800 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6801 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6802 sizeof(unsigned long),
6804 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6808 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6810 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6812 goto free_vfi_bmask;
6815 for (i = 0; i < count; i++)
6816 phba->sli4_hba.vfi_ids[i] = base + i;
6819 * Mark all resources ready. An HBA reset doesn't need
6820 * to reset the initialization.
6822 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6828 kfree(phba->sli4_hba.vfi_bmask);
6829 phba->sli4_hba.vfi_bmask = NULL;
6831 kfree(phba->sli4_hba.xri_ids);
6832 phba->sli4_hba.xri_ids = NULL;
6834 kfree(phba->sli4_hba.xri_bmask);
6835 phba->sli4_hba.xri_bmask = NULL;
6837 kfree(phba->vpi_ids);
6838 phba->vpi_ids = NULL;
6840 kfree(phba->vpi_bmask);
6841 phba->vpi_bmask = NULL;
6843 kfree(phba->sli4_hba.rpi_ids);
6844 phba->sli4_hba.rpi_ids = NULL;
6846 kfree(phba->sli4_hba.rpi_bmask);
6847 phba->sli4_hba.rpi_bmask = NULL;
6853 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6854 * @phba: Pointer to HBA context object.
6856 * This function allocates the number of elements for the specified
6860 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6862 if (phba->sli4_hba.extents_in_use) {
6863 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6864 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6865 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6866 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6868 kfree(phba->vpi_bmask);
6869 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6870 kfree(phba->vpi_ids);
6871 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6872 kfree(phba->sli4_hba.xri_bmask);
6873 kfree(phba->sli4_hba.xri_ids);
6874 kfree(phba->sli4_hba.vfi_bmask);
6875 kfree(phba->sli4_hba.vfi_ids);
6876 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6877 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6884 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6885 * @phba: Pointer to HBA context object.
6886 * @type: The resource extent type.
6887 * @extnt_cnt: buffer to hold port extent count response
6888 * @extnt_size: buffer to hold port extent size response.
6890 * This function calls the port to read the host allocated extents
6891 * for a particular type.
6894 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6895 uint16_t *extnt_cnt, uint16_t *extnt_size)
6899 uint16_t curr_blks = 0;
6900 uint32_t req_len, emb_len;
6901 uint32_t alloc_len, mbox_tmo;
6902 struct list_head *blk_list_head;
6903 struct lpfc_rsrc_blks *rsrc_blk;
6905 void *virtaddr = NULL;
6906 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6907 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6908 union lpfc_sli4_cfg_shdr *shdr;
6911 case LPFC_RSC_TYPE_FCOE_VPI:
6912 blk_list_head = &phba->lpfc_vpi_blk_list;
6914 case LPFC_RSC_TYPE_FCOE_XRI:
6915 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6917 case LPFC_RSC_TYPE_FCOE_VFI:
6918 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6920 case LPFC_RSC_TYPE_FCOE_RPI:
6921 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6927 /* Count the number of extents currently allocatd for this type. */
6928 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6929 if (curr_blks == 0) {
6931 * The GET_ALLOCATED mailbox does not return the size,
6932 * just the count. The size should be just the size
6933 * stored in the current allocated block and all sizes
6934 * for an extent type are the same so set the return
6937 *extnt_size = rsrc_blk->rsrc_size;
6943 * Calculate the size of an embedded mailbox. The uint32_t
6944 * accounts for extents-specific word.
6946 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6950 * Presume the allocation and response will fit into an embedded
6951 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6953 emb = LPFC_SLI4_MBX_EMBED;
6955 if (req_len > emb_len) {
6956 req_len = curr_blks * sizeof(uint16_t) +
6957 sizeof(union lpfc_sli4_cfg_shdr) +
6959 emb = LPFC_SLI4_MBX_NEMBED;
6962 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6965 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6967 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6968 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6970 if (alloc_len < req_len) {
6971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6972 "2983 Allocated DMA memory size (x%x) is "
6973 "less than the requested DMA memory "
6974 "size (x%x)\n", alloc_len, req_len);
6978 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6984 if (!phba->sli4_hba.intr_enable)
6985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6987 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6997 * Figure out where the response is located. Then get local pointers
6998 * to the response data. The port does not guarantee to respond to
6999 * all extents counts request so update the local variable with the
7000 * allocated count from the port.
7002 if (emb == LPFC_SLI4_MBX_EMBED) {
7003 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7004 shdr = &rsrc_ext->header.cfg_shdr;
7005 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7007 virtaddr = mbox->sge_array->addr[0];
7008 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7009 shdr = &n_rsrc->cfg_shdr;
7010 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7013 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7015 "2984 Failed to read allocated resources "
7016 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7018 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7019 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7024 lpfc_sli4_mbox_cmd_free(phba, mbox);
7029 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7030 * @phba: pointer to lpfc hba data structure.
7031 * @sgl_list: linked link of sgl buffers to post
7032 * @cnt: number of linked list buffers
7034 * This routine walks the list of buffers that have been allocated and
7035 * repost them to the port by using SGL block post. This is needed after a
7036 * pci_function_reset/warm_start or start. It attempts to construct blocks
7037 * of buffer sgls which contains contiguous xris and uses the non-embedded
7038 * SGL block post mailbox commands to post them to the port. For single
7039 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7040 * mailbox command for posting.
7042 * Returns: 0 = success, non-zero failure.
7045 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7046 struct list_head *sgl_list, int cnt)
7048 struct lpfc_sglq *sglq_entry = NULL;
7049 struct lpfc_sglq *sglq_entry_next = NULL;
7050 struct lpfc_sglq *sglq_entry_first = NULL;
7051 int status, total_cnt;
7052 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7053 int last_xritag = NO_XRI;
7054 LIST_HEAD(prep_sgl_list);
7055 LIST_HEAD(blck_sgl_list);
7056 LIST_HEAD(allc_sgl_list);
7057 LIST_HEAD(post_sgl_list);
7058 LIST_HEAD(free_sgl_list);
7060 spin_lock_irq(&phba->hbalock);
7061 spin_lock(&phba->sli4_hba.sgl_list_lock);
7062 list_splice_init(sgl_list, &allc_sgl_list);
7063 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7064 spin_unlock_irq(&phba->hbalock);
7067 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7068 &allc_sgl_list, list) {
7069 list_del_init(&sglq_entry->list);
7071 if ((last_xritag != NO_XRI) &&
7072 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7073 /* a hole in xri block, form a sgl posting block */
7074 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7075 post_cnt = block_cnt - 1;
7076 /* prepare list for next posting block */
7077 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7080 /* prepare list for next posting block */
7081 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7082 /* enough sgls for non-embed sgl mbox command */
7083 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7084 list_splice_init(&prep_sgl_list,
7086 post_cnt = block_cnt;
7092 /* keep track of last sgl's xritag */
7093 last_xritag = sglq_entry->sli4_xritag;
7095 /* end of repost sgl list condition for buffers */
7096 if (num_posted == total_cnt) {
7097 if (post_cnt == 0) {
7098 list_splice_init(&prep_sgl_list,
7100 post_cnt = block_cnt;
7101 } else if (block_cnt == 1) {
7102 status = lpfc_sli4_post_sgl(phba,
7103 sglq_entry->phys, 0,
7104 sglq_entry->sli4_xritag);
7106 /* successful, put sgl to posted list */
7107 list_add_tail(&sglq_entry->list,
7110 /* Failure, put sgl to free list */
7111 lpfc_printf_log(phba, KERN_WARNING,
7113 "3159 Failed to post "
7114 "sgl, xritag:x%x\n",
7115 sglq_entry->sli4_xritag);
7116 list_add_tail(&sglq_entry->list,
7123 /* continue until a nembed page worth of sgls */
7127 /* post the buffer list sgls as a block */
7128 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7132 /* success, put sgl list to posted sgl list */
7133 list_splice_init(&blck_sgl_list, &post_sgl_list);
7135 /* Failure, put sgl list to free sgl list */
7136 sglq_entry_first = list_first_entry(&blck_sgl_list,
7139 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7140 "3160 Failed to post sgl-list, "
7142 sglq_entry_first->sli4_xritag,
7143 (sglq_entry_first->sli4_xritag +
7145 list_splice_init(&blck_sgl_list, &free_sgl_list);
7146 total_cnt -= post_cnt;
7149 /* don't reset xirtag due to hole in xri block */
7151 last_xritag = NO_XRI;
7153 /* reset sgl post count for next round of posting */
7157 /* free the sgls failed to post */
7158 lpfc_free_sgl_list(phba, &free_sgl_list);
7160 /* push sgls posted to the available list */
7161 if (!list_empty(&post_sgl_list)) {
7162 spin_lock_irq(&phba->hbalock);
7163 spin_lock(&phba->sli4_hba.sgl_list_lock);
7164 list_splice_init(&post_sgl_list, sgl_list);
7165 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7166 spin_unlock_irq(&phba->hbalock);
7168 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7169 "3161 Failure to post sgl to port.\n");
7173 /* return the number of XRIs actually posted */
7178 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7179 * @phba: pointer to lpfc hba data structure.
7181 * This routine walks the list of nvme buffers that have been allocated and
7182 * repost them to the port by using SGL block post. This is needed after a
7183 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7184 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7185 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7187 * Returns: 0 = success, non-zero failure.
7190 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7192 LIST_HEAD(post_nblist);
7193 int num_posted, rc = 0;
7195 /* get all NVME buffers need to repost to a local list */
7196 lpfc_io_buf_flush(phba, &post_nblist);
7198 /* post the list of nvme buffer sgls to port if available */
7199 if (!list_empty(&post_nblist)) {
7200 num_posted = lpfc_sli4_post_io_sgl_list(
7201 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7202 /* failed to post any nvme buffer, return error */
7203 if (num_posted == 0)
7210 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7214 len = sizeof(struct lpfc_mbx_set_host_data) -
7215 sizeof(struct lpfc_sli4_cfg_mhdr);
7216 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7217 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7218 LPFC_SLI4_MBX_EMBED);
7220 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7221 mbox->u.mqe.un.set_host_data.param_len =
7222 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7223 snprintf(mbox->u.mqe.un.set_host_data.data,
7224 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7225 "Linux %s v"LPFC_DRIVER_VERSION,
7226 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7230 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7231 struct lpfc_queue *drq, int count, int idx)
7234 struct lpfc_rqe hrqe;
7235 struct lpfc_rqe drqe;
7236 struct lpfc_rqb *rqbp;
7237 unsigned long flags;
7238 struct rqb_dmabuf *rqb_buffer;
7239 LIST_HEAD(rqb_buf_list);
7242 for (i = 0; i < count; i++) {
7243 spin_lock_irqsave(&phba->hbalock, flags);
7244 /* IF RQ is already full, don't bother */
7245 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7246 spin_unlock_irqrestore(&phba->hbalock, flags);
7249 spin_unlock_irqrestore(&phba->hbalock, flags);
7251 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7254 rqb_buffer->hrq = hrq;
7255 rqb_buffer->drq = drq;
7256 rqb_buffer->idx = idx;
7257 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7260 spin_lock_irqsave(&phba->hbalock, flags);
7261 while (!list_empty(&rqb_buf_list)) {
7262 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7265 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7266 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7267 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7268 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7269 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7272 "6421 Cannot post to HRQ %d: %x %x %x "
7280 rqbp->rqb_free_buffer(phba, rqb_buffer);
7282 list_add_tail(&rqb_buffer->hbuf.list,
7283 &rqbp->rqb_buffer_list);
7284 rqbp->buffer_count++;
7287 spin_unlock_irqrestore(&phba->hbalock, flags);
7292 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7293 * @phba: pointer to lpfc hba data structure.
7295 * This routine initializes the per-cq idle_stat to dynamically dictate
7296 * polling decisions.
7301 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7304 struct lpfc_sli4_hdw_queue *hdwq;
7305 struct lpfc_queue *cq;
7306 struct lpfc_idle_stat *idle_stat;
7309 for_each_present_cpu(i) {
7310 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7313 /* Skip if we've already handled this cq's primary CPU */
7317 idle_stat = &phba->sli4_hba.idle_stat[i];
7319 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7320 idle_stat->prev_wall = wall;
7322 if (phba->nvmet_support)
7323 cq->poll_mode = LPFC_QUEUE_WORK;
7325 cq->poll_mode = LPFC_IRQ_POLL;
7328 if (!phba->nvmet_support)
7329 schedule_delayed_work(&phba->idle_stat_delay_work,
7330 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7333 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7337 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7338 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7339 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7340 struct lpfc_register reg_data;
7342 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7346 if (bf_get(lpfc_sliport_status_dip, ®_data))
7347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7348 "2904 Firmware Dump Image Present"
7354 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7355 * @phba: Pointer to HBA context object.
7357 * This function is the main SLI4 device initialization PCI function. This
7358 * function is called by the HBA initialization code, HBA reset code and
7359 * HBA error attention handler code. Caller is not required to hold any
7363 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7365 int rc, i, cnt, len, dd;
7366 LPFC_MBOXQ_t *mboxq;
7367 struct lpfc_mqe *mqe;
7370 uint32_t ftr_rsp = 0;
7371 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7372 struct lpfc_vport *vport = phba->pport;
7373 struct lpfc_dmabuf *mp;
7374 struct lpfc_rqb *rqbp;
7377 /* Perform a PCI function reset to start from clean */
7378 rc = lpfc_pci_function_reset(phba);
7382 /* Check the HBA Host Status Register for readyness */
7383 rc = lpfc_sli4_post_status_check(phba);
7387 spin_lock_irq(&phba->hbalock);
7388 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7389 flg = phba->sli.sli_flag;
7390 spin_unlock_irq(&phba->hbalock);
7391 /* Allow a little time after setting SLI_ACTIVE for any polled
7392 * MBX commands to complete via BSG.
7394 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
7396 spin_lock_irq(&phba->hbalock);
7397 flg = phba->sli.sli_flag;
7398 spin_unlock_irq(&phba->hbalock);
7402 lpfc_sli4_dip(phba);
7405 * Allocate a single mailbox container for initializing the
7408 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7412 /* Issue READ_REV to collect vpd and FW information. */
7413 vpd_size = SLI4_PAGE_SIZE;
7414 vpd = kzalloc(vpd_size, GFP_KERNEL);
7420 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7426 mqe = &mboxq->u.mqe;
7427 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7428 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7429 phba->hba_flag |= HBA_FCOE_MODE;
7430 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7432 phba->hba_flag &= ~HBA_FCOE_MODE;
7435 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7437 phba->hba_flag |= HBA_FIP_SUPPORT;
7439 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7441 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7443 if (phba->sli_rev != LPFC_SLI_REV4) {
7444 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7445 "0376 READ_REV Error. SLI Level %d "
7446 "FCoE enabled %d\n",
7447 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7454 * Continue initialization with default values even if driver failed
7455 * to read FCoE param config regions, only read parameters if the
7458 if (phba->hba_flag & HBA_FCOE_MODE &&
7459 lpfc_sli4_read_fcoe_params(phba))
7460 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7461 "2570 Failed to read FCoE parameters\n");
7464 * Retrieve sli4 device physical port name, failure of doing it
7465 * is considered as non-fatal.
7467 rc = lpfc_sli4_retrieve_pport_name(phba);
7469 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7470 "3080 Successful retrieving SLI4 device "
7471 "physical port name: %s.\n", phba->Port);
7473 rc = lpfc_sli4_get_ctl_attr(phba);
7475 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7476 "8351 Successful retrieving SLI4 device "
7480 * Evaluate the read rev and vpd data. Populate the driver
7481 * state with the results. If this routine fails, the failure
7482 * is not fatal as the driver will use generic values.
7484 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7485 if (unlikely(!rc)) {
7486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7487 "0377 Error %d parsing vpd. "
7488 "Using defaults.\n", rc);
7493 /* Save information as VPD data */
7494 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7495 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7498 * This is because first G7 ASIC doesn't support the standard
7499 * 0x5a NVME cmd descriptor type/subtype
7501 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7502 LPFC_SLI_INTF_IF_TYPE_6) &&
7503 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7504 (phba->vpd.rev.smRev == 0) &&
7505 (phba->cfg_nvme_embed_cmd == 1))
7506 phba->cfg_nvme_embed_cmd = 0;
7508 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7509 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7511 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7513 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7515 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7517 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7518 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7519 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7520 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7521 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7522 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7523 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7524 "(%d):0380 READ_REV Status x%x "
7525 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7526 mboxq->vport ? mboxq->vport->vpi : 0,
7527 bf_get(lpfc_mqe_status, mqe),
7528 phba->vpd.rev.opFwName,
7529 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7530 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7532 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7533 LPFC_SLI_INTF_IF_TYPE_0) {
7534 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7535 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7536 if (rc == MBX_SUCCESS) {
7537 phba->hba_flag |= HBA_RECOVERABLE_UE;
7538 /* Set 1Sec interval to detect UE */
7539 phba->eratt_poll_interval = 1;
7540 phba->sli4_hba.ue_to_sr = bf_get(
7541 lpfc_mbx_set_feature_UESR,
7542 &mboxq->u.mqe.un.set_feature);
7543 phba->sli4_hba.ue_to_rp = bf_get(
7544 lpfc_mbx_set_feature_UERP,
7545 &mboxq->u.mqe.un.set_feature);
7549 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7550 /* Enable MDS Diagnostics only if the SLI Port supports it */
7551 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7552 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7553 if (rc != MBX_SUCCESS)
7554 phba->mds_diags_support = 0;
7558 * Discover the port's supported feature set and match it against the
7561 lpfc_request_features(phba, mboxq);
7562 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7569 * The port must support FCP initiator mode as this is the
7570 * only mode running in the host.
7572 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7573 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7574 "0378 No support for fcpi mode.\n");
7578 /* Performance Hints are ONLY for FCoE */
7579 if (phba->hba_flag & HBA_FCOE_MODE) {
7580 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7581 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7583 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7587 * If the port cannot support the host's requested features
7588 * then turn off the global config parameters to disable the
7589 * feature in the driver. This is not a fatal error.
7591 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7592 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7593 phba->cfg_enable_bg = 0;
7594 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7599 if (phba->max_vpi && phba->cfg_enable_npiv &&
7600 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7605 "0379 Feature Mismatch Data: x%08x %08x "
7606 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7607 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7608 phba->cfg_enable_npiv, phba->max_vpi);
7609 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7610 phba->cfg_enable_bg = 0;
7611 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7612 phba->cfg_enable_npiv = 0;
7615 /* These SLI3 features are assumed in SLI4 */
7616 spin_lock_irq(&phba->hbalock);
7617 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7618 spin_unlock_irq(&phba->hbalock);
7620 /* Always try to enable dual dump feature if we can */
7621 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7622 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7623 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7624 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7625 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7626 "6448 Dual Dump is enabled\n");
7628 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7629 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7631 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7632 lpfc_sli_config_mbox_subsys_get(
7634 lpfc_sli_config_mbox_opcode_get(
7638 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7639 * calls depends on these resources to complete port setup.
7641 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7643 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7644 "2920 Failed to alloc Resource IDs "
7649 lpfc_set_host_data(phba, mboxq);
7651 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7653 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7654 "2134 Failed to set host os driver version %x",
7658 /* Read the port's service parameters. */
7659 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7661 phba->link_state = LPFC_HBA_ERROR;
7666 mboxq->vport = vport;
7667 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7668 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7669 if (rc == MBX_SUCCESS) {
7670 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7675 * This memory was allocated by the lpfc_read_sparam routine. Release
7676 * it to the mbuf pool.
7678 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7680 mboxq->ctx_buf = NULL;
7682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7683 "0382 READ_SPARAM command failed "
7684 "status %d, mbxStatus x%x\n",
7685 rc, bf_get(lpfc_mqe_status, mqe));
7686 phba->link_state = LPFC_HBA_ERROR;
7691 lpfc_update_vport_wwn(vport);
7693 /* Update the fc_host data structures with new wwn. */
7694 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7695 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7697 /* Create all the SLI4 queues */
7698 rc = lpfc_sli4_queue_create(phba);
7700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7701 "3089 Failed to allocate queues\n");
7705 /* Set up all the queues to the device */
7706 rc = lpfc_sli4_queue_setup(phba);
7708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7709 "0381 Error %d during queue setup.\n ", rc);
7710 goto out_stop_timers;
7712 /* Initialize the driver internal SLI layer lists. */
7713 lpfc_sli4_setup(phba);
7714 lpfc_sli4_queue_init(phba);
7716 /* update host els xri-sgl sizes and mappings */
7717 rc = lpfc_sli4_els_sgl_update(phba);
7719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7720 "1400 Failed to update xri-sgl size and "
7721 "mapping: %d\n", rc);
7722 goto out_destroy_queue;
7725 /* register the els sgl pool to the port */
7726 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7727 phba->sli4_hba.els_xri_cnt);
7728 if (unlikely(rc < 0)) {
7729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7730 "0582 Error %d during els sgl post "
7733 goto out_destroy_queue;
7735 phba->sli4_hba.els_xri_cnt = rc;
7737 if (phba->nvmet_support) {
7738 /* update host nvmet xri-sgl sizes and mappings */
7739 rc = lpfc_sli4_nvmet_sgl_update(phba);
7741 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7742 "6308 Failed to update nvmet-sgl size "
7743 "and mapping: %d\n", rc);
7744 goto out_destroy_queue;
7747 /* register the nvmet sgl pool to the port */
7748 rc = lpfc_sli4_repost_sgl_list(
7750 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7751 phba->sli4_hba.nvmet_xri_cnt);
7752 if (unlikely(rc < 0)) {
7753 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7754 "3117 Error %d during nvmet "
7757 goto out_destroy_queue;
7759 phba->sli4_hba.nvmet_xri_cnt = rc;
7761 /* We allocate an iocbq for every receive context SGL.
7762 * The additional allocation is for abort and ls handling.
7764 cnt = phba->sli4_hba.nvmet_xri_cnt +
7765 phba->sli4_hba.max_cfg_param.max_xri;
7767 /* update host common xri-sgl sizes and mappings */
7768 rc = lpfc_sli4_io_sgl_update(phba);
7770 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7771 "6082 Failed to update nvme-sgl size "
7772 "and mapping: %d\n", rc);
7773 goto out_destroy_queue;
7776 /* register the allocated common sgl pool to the port */
7777 rc = lpfc_sli4_repost_io_sgl_list(phba);
7779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7780 "6116 Error %d during nvme sgl post "
7782 /* Some NVME buffers were moved to abort nvme list */
7783 /* A pci function reset will repost them */
7785 goto out_destroy_queue;
7787 /* Each lpfc_io_buf job structure has an iocbq element.
7788 * This cnt provides for abort, els, ct and ls requests.
7790 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7793 if (!phba->sli.iocbq_lookup) {
7794 /* Initialize and populate the iocb list per host */
7795 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7796 "2821 initialize iocb list with %d entries\n",
7798 rc = lpfc_init_iocb_list(phba, cnt);
7800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7801 "1413 Failed to init iocb list.\n");
7802 goto out_destroy_queue;
7806 if (phba->nvmet_support)
7807 lpfc_nvmet_create_targetport(phba);
7809 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7810 /* Post initial buffers to all RQs created */
7811 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7812 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7813 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7814 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7815 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7816 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7817 rqbp->buffer_count = 0;
7819 lpfc_post_rq_buffer(
7820 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7821 phba->sli4_hba.nvmet_mrq_data[i],
7822 phba->cfg_nvmet_mrq_post, i);
7826 /* Post the rpi header region to the device. */
7827 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7830 "0393 Error %d during rpi post operation\n",
7833 goto out_free_iocblist;
7835 lpfc_sli4_node_prep(phba);
7837 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7838 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7840 * The FC Port needs to register FCFI (index 0)
7842 lpfc_reg_fcfi(phba, mboxq);
7843 mboxq->vport = phba->pport;
7844 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7845 if (rc != MBX_SUCCESS)
7846 goto out_unset_queue;
7848 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7849 &mboxq->u.mqe.un.reg_fcfi);
7851 /* We are a NVME Target mode with MRQ > 1 */
7853 /* First register the FCFI */
7854 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7855 mboxq->vport = phba->pport;
7856 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7857 if (rc != MBX_SUCCESS)
7858 goto out_unset_queue;
7860 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7861 &mboxq->u.mqe.un.reg_fcfi_mrq);
7863 /* Next register the MRQs */
7864 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7865 mboxq->vport = phba->pport;
7866 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7867 if (rc != MBX_SUCCESS)
7868 goto out_unset_queue;
7871 /* Check if the port is configured to be disabled */
7872 lpfc_sli_read_link_ste(phba);
7875 /* Don't post more new bufs if repost already recovered
7878 if (phba->nvmet_support == 0) {
7879 if (phba->sli4_hba.io_xri_cnt == 0) {
7880 len = lpfc_new_io_buf(
7881 phba, phba->sli4_hba.io_xri_max);
7884 goto out_unset_queue;
7887 if (phba->cfg_xri_rebalancing)
7888 lpfc_create_multixri_pools(phba);
7891 phba->cfg_xri_rebalancing = 0;
7894 /* Allow asynchronous mailbox command to go through */
7895 spin_lock_irq(&phba->hbalock);
7896 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7897 spin_unlock_irq(&phba->hbalock);
7899 /* Post receive buffers to the device */
7900 lpfc_sli4_rb_setup(phba);
7902 /* Reset HBA FCF states after HBA reset */
7903 phba->fcf.fcf_flag = 0;
7904 phba->fcf.current_rec.flag = 0;
7906 /* Start the ELS watchdog timer */
7907 mod_timer(&vport->els_tmofunc,
7908 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7910 /* Start heart beat timer */
7911 mod_timer(&phba->hb_tmofunc,
7912 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7913 phba->hb_outstanding = 0;
7914 phba->last_completion_time = jiffies;
7916 /* start eq_delay heartbeat */
7917 if (phba->cfg_auto_imax)
7918 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7919 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7921 /* start per phba idle_stat_delay heartbeat */
7922 lpfc_init_idle_stat_hb(phba);
7924 /* Start error attention (ERATT) polling timer */
7925 mod_timer(&phba->eratt_poll,
7926 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7928 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7929 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7930 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7933 "2829 This device supports "
7934 "Advanced Error Reporting (AER)\n");
7935 spin_lock_irq(&phba->hbalock);
7936 phba->hba_flag |= HBA_AER_ENABLED;
7937 spin_unlock_irq(&phba->hbalock);
7939 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7940 "2830 This device does not support "
7941 "Advanced Error Reporting (AER)\n");
7942 phba->cfg_aer_support = 0;
7948 * The port is ready, set the host's link state to LINK_DOWN
7949 * in preparation for link interrupts.
7951 spin_lock_irq(&phba->hbalock);
7952 phba->link_state = LPFC_LINK_DOWN;
7954 /* Check if physical ports are trunked */
7955 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7956 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7957 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7958 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7959 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7960 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7961 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7962 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7963 spin_unlock_irq(&phba->hbalock);
7965 /* Arm the CQs and then EQs on device */
7966 lpfc_sli4_arm_cqeq_intr(phba);
7968 /* Indicate device interrupt mode */
7969 phba->sli4_hba.intr_enable = 1;
7971 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7972 (phba->hba_flag & LINK_DISABLED)) {
7973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7974 "3103 Adapter Link is disabled.\n");
7975 lpfc_down_link(phba, mboxq);
7976 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7977 if (rc != MBX_SUCCESS) {
7978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7979 "3104 Adapter failed to issue "
7980 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7981 goto out_io_buff_free;
7983 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7984 /* don't perform init_link on SLI4 FC port loopback test */
7985 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7986 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7988 goto out_io_buff_free;
7991 mempool_free(mboxq, phba->mbox_mem_pool);
7994 /* Free allocated IO Buffers */
7997 /* Unset all the queues set up in this routine when error out */
7998 lpfc_sli4_queue_unset(phba);
8000 lpfc_free_iocb_list(phba);
8002 lpfc_sli4_queue_destroy(phba);
8004 lpfc_stop_hba_timers(phba);
8006 mempool_free(mboxq, phba->mbox_mem_pool);
8011 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8012 * @t: Context to fetch pointer to hba structure from.
8014 * This is the callback function for mailbox timer. The mailbox
8015 * timer is armed when a new mailbox command is issued and the timer
8016 * is deleted when the mailbox complete. The function is called by
8017 * the kernel timer code when a mailbox does not complete within
8018 * expected time. This function wakes up the worker thread to
8019 * process the mailbox timeout and returns. All the processing is
8020 * done by the worker thread function lpfc_mbox_timeout_handler.
8023 lpfc_mbox_timeout(struct timer_list *t)
8025 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8026 unsigned long iflag;
8027 uint32_t tmo_posted;
8029 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8030 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8032 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8033 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8036 lpfc_worker_wake_up(phba);
8041 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8043 * @phba: Pointer to HBA context object.
8045 * This function checks if any mailbox completions are present on the mailbox
8049 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8053 struct lpfc_queue *mcq;
8054 struct lpfc_mcqe *mcqe;
8055 bool pending_completions = false;
8058 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8061 /* Check for completions on mailbox completion queue */
8063 mcq = phba->sli4_hba.mbx_cq;
8064 idx = mcq->hba_index;
8065 qe_valid = mcq->qe_valid;
8066 while (bf_get_le32(lpfc_cqe_valid,
8067 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8068 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8069 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8070 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8071 pending_completions = true;
8074 idx = (idx + 1) % mcq->entry_count;
8075 if (mcq->hba_index == idx)
8078 /* if the index wrapped around, toggle the valid bit */
8079 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8080 qe_valid = (qe_valid) ? 0 : 1;
8082 return pending_completions;
8087 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8089 * @phba: Pointer to HBA context object.
8091 * For sli4, it is possible to miss an interrupt. As such mbox completions
8092 * maybe missed causing erroneous mailbox timeouts to occur. This function
8093 * checks to see if mbox completions are on the mailbox completion queue
8094 * and will process all the completions associated with the eq for the
8095 * mailbox completion queue.
8098 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8100 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8102 struct lpfc_queue *fpeq = NULL;
8103 struct lpfc_queue *eq;
8106 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8109 /* Find the EQ associated with the mbox CQ */
8110 if (sli4_hba->hdwq) {
8111 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8112 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8113 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8122 /* Turn off interrupts from this EQ */
8124 sli4_hba->sli4_eq_clr_intr(fpeq);
8126 /* Check to see if a mbox completion is pending */
8128 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8131 * If a mbox completion is pending, process all the events on EQ
8132 * associated with the mbox completion queue (this could include
8133 * mailbox commands, async events, els commands, receive queue data
8138 /* process and rearm the EQ */
8139 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8141 /* Always clear and re-arm the EQ */
8142 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8144 return mbox_pending;
8149 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8150 * @phba: Pointer to HBA context object.
8152 * This function is called from worker thread when a mailbox command times out.
8153 * The caller is not required to hold any locks. This function will reset the
8154 * HBA and recover all the pending commands.
8157 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8159 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8160 MAILBOX_t *mb = NULL;
8162 struct lpfc_sli *psli = &phba->sli;
8164 /* If the mailbox completed, process the completion and return */
8165 if (lpfc_sli4_process_missed_mbox_completions(phba))
8170 /* Check the pmbox pointer first. There is a race condition
8171 * between the mbox timeout handler getting executed in the
8172 * worklist and the mailbox actually completing. When this
8173 * race condition occurs, the mbox_active will be NULL.
8175 spin_lock_irq(&phba->hbalock);
8176 if (pmbox == NULL) {
8177 lpfc_printf_log(phba, KERN_WARNING,
8179 "0353 Active Mailbox cleared - mailbox timeout "
8181 spin_unlock_irq(&phba->hbalock);
8185 /* Mbox cmd <mbxCommand> timeout */
8186 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8187 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8189 phba->pport->port_state,
8191 phba->sli.mbox_active);
8192 spin_unlock_irq(&phba->hbalock);
8194 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8195 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8196 * it to fail all outstanding SCSI IO.
8198 spin_lock_irq(&phba->pport->work_port_lock);
8199 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8200 spin_unlock_irq(&phba->pport->work_port_lock);
8201 spin_lock_irq(&phba->hbalock);
8202 phba->link_state = LPFC_LINK_UNKNOWN;
8203 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8204 spin_unlock_irq(&phba->hbalock);
8206 lpfc_sli_abort_fcp_rings(phba);
8208 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8209 "0345 Resetting board due to mailbox timeout\n");
8211 /* Reset the HBA device */
8212 lpfc_reset_hba(phba);
8216 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8217 * @phba: Pointer to HBA context object.
8218 * @pmbox: Pointer to mailbox object.
8219 * @flag: Flag indicating how the mailbox need to be processed.
8221 * This function is called by discovery code and HBA management code
8222 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8223 * function gets the hbalock to protect the data structures.
8224 * The mailbox command can be submitted in polling mode, in which case
8225 * this function will wait in a polling loop for the completion of the
8227 * If the mailbox is submitted in no_wait mode (not polling) the
8228 * function will submit the command and returns immediately without waiting
8229 * for the mailbox completion. The no_wait is supported only when HBA
8230 * is in SLI2/SLI3 mode - interrupts are enabled.
8231 * The SLI interface allows only one mailbox pending at a time. If the
8232 * mailbox is issued in polling mode and there is already a mailbox
8233 * pending, then the function will return an error. If the mailbox is issued
8234 * in NO_WAIT mode and there is a mailbox pending already, the function
8235 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8236 * The sli layer owns the mailbox object until the completion of mailbox
8237 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8238 * return codes the caller owns the mailbox command after the return of
8242 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8246 struct lpfc_sli *psli = &phba->sli;
8247 uint32_t status, evtctr;
8248 uint32_t ha_copy, hc_copy;
8250 unsigned long timeout;
8251 unsigned long drvr_flag = 0;
8252 uint32_t word0, ldata;
8253 void __iomem *to_slim;
8254 int processing_queue = 0;
8256 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8258 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8259 /* processing mbox queue from intr_handler */
8260 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8261 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8264 processing_queue = 1;
8265 pmbox = lpfc_mbox_get(phba);
8267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8272 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8273 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8275 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8276 lpfc_printf_log(phba, KERN_ERR,
8277 LOG_MBOX | LOG_VPORT,
8278 "1806 Mbox x%x failed. No vport\n",
8279 pmbox->u.mb.mbxCommand);
8281 goto out_not_finished;
8285 /* If the PCI channel is in offline state, do not post mbox. */
8286 if (unlikely(pci_channel_offline(phba->pcidev))) {
8287 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8288 goto out_not_finished;
8291 /* If HBA has a deferred error attention, fail the iocb. */
8292 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8293 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8294 goto out_not_finished;
8300 status = MBX_SUCCESS;
8302 if (phba->link_state == LPFC_HBA_ERROR) {
8303 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8305 /* Mbox command <mbxCommand> cannot issue */
8306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8307 "(%d):0311 Mailbox command x%x cannot "
8308 "issue Data: x%x x%x\n",
8309 pmbox->vport ? pmbox->vport->vpi : 0,
8310 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8311 goto out_not_finished;
8314 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8315 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8316 !(hc_copy & HC_MBINT_ENA)) {
8317 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8319 "(%d):2528 Mailbox command x%x cannot "
8320 "issue Data: x%x x%x\n",
8321 pmbox->vport ? pmbox->vport->vpi : 0,
8322 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8323 goto out_not_finished;
8327 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8328 /* Polling for a mbox command when another one is already active
8329 * is not allowed in SLI. Also, the driver must have established
8330 * SLI2 mode to queue and process multiple mbox commands.
8333 if (flag & MBX_POLL) {
8334 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8336 /* Mbox command <mbxCommand> cannot issue */
8337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8338 "(%d):2529 Mailbox command x%x "
8339 "cannot issue Data: x%x x%x\n",
8340 pmbox->vport ? pmbox->vport->vpi : 0,
8341 pmbox->u.mb.mbxCommand,
8342 psli->sli_flag, flag);
8343 goto out_not_finished;
8346 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8347 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8348 /* Mbox command <mbxCommand> cannot issue */
8349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8350 "(%d):2530 Mailbox command x%x "
8351 "cannot issue Data: x%x x%x\n",
8352 pmbox->vport ? pmbox->vport->vpi : 0,
8353 pmbox->u.mb.mbxCommand,
8354 psli->sli_flag, flag);
8355 goto out_not_finished;
8358 /* Another mailbox command is still being processed, queue this
8359 * command to be processed later.
8361 lpfc_mbox_put(phba, pmbox);
8363 /* Mbox cmd issue - BUSY */
8364 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8365 "(%d):0308 Mbox cmd issue - BUSY Data: "
8366 "x%x x%x x%x x%x\n",
8367 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8369 phba->pport ? phba->pport->port_state : 0xff,
8370 psli->sli_flag, flag);
8372 psli->slistat.mbox_busy++;
8373 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8376 lpfc_debugfs_disc_trc(pmbox->vport,
8377 LPFC_DISC_TRC_MBOX_VPORT,
8378 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8379 (uint32_t)mbx->mbxCommand,
8380 mbx->un.varWords[0], mbx->un.varWords[1]);
8383 lpfc_debugfs_disc_trc(phba->pport,
8385 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8386 (uint32_t)mbx->mbxCommand,
8387 mbx->un.varWords[0], mbx->un.varWords[1]);
8393 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8395 /* If we are not polling, we MUST be in SLI2 mode */
8396 if (flag != MBX_POLL) {
8397 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8398 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8399 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8400 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8401 /* Mbox command <mbxCommand> cannot issue */
8402 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8403 "(%d):2531 Mailbox command x%x "
8404 "cannot issue Data: x%x x%x\n",
8405 pmbox->vport ? pmbox->vport->vpi : 0,
8406 pmbox->u.mb.mbxCommand,
8407 psli->sli_flag, flag);
8408 goto out_not_finished;
8410 /* timeout active mbox command */
8411 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8413 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8416 /* Mailbox cmd <cmd> issue */
8417 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8418 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8420 pmbox->vport ? pmbox->vport->vpi : 0,
8422 phba->pport ? phba->pport->port_state : 0xff,
8423 psli->sli_flag, flag);
8425 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8427 lpfc_debugfs_disc_trc(pmbox->vport,
8428 LPFC_DISC_TRC_MBOX_VPORT,
8429 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8430 (uint32_t)mbx->mbxCommand,
8431 mbx->un.varWords[0], mbx->un.varWords[1]);
8434 lpfc_debugfs_disc_trc(phba->pport,
8436 "MBOX Send: cmd:x%x mb:x%x x%x",
8437 (uint32_t)mbx->mbxCommand,
8438 mbx->un.varWords[0], mbx->un.varWords[1]);
8442 psli->slistat.mbox_cmd++;
8443 evtctr = psli->slistat.mbox_event;
8445 /* next set own bit for the adapter and copy over command word */
8446 mbx->mbxOwner = OWN_CHIP;
8448 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8449 /* Populate mbox extension offset word. */
8450 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8451 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8452 = (uint8_t *)phba->mbox_ext
8453 - (uint8_t *)phba->mbox;
8456 /* Copy the mailbox extension data */
8457 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8458 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8459 (uint8_t *)phba->mbox_ext,
8460 pmbox->in_ext_byte_len);
8462 /* Copy command data to host SLIM area */
8463 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8465 /* Populate mbox extension offset word. */
8466 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8467 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8468 = MAILBOX_HBA_EXT_OFFSET;
8470 /* Copy the mailbox extension data */
8471 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8472 lpfc_memcpy_to_slim(phba->MBslimaddr +
8473 MAILBOX_HBA_EXT_OFFSET,
8474 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8476 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8477 /* copy command data into host mbox for cmpl */
8478 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8481 /* First copy mbox command data to HBA SLIM, skip past first
8483 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8484 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8485 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8487 /* Next copy over first word, with mbxOwner set */
8488 ldata = *((uint32_t *)mbx);
8489 to_slim = phba->MBslimaddr;
8490 writel(ldata, to_slim);
8491 readl(to_slim); /* flush */
8493 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8494 /* switch over to host mailbox */
8495 psli->sli_flag |= LPFC_SLI_ACTIVE;
8502 /* Set up reference to mailbox command */
8503 psli->mbox_active = pmbox;
8504 /* Interrupt board to do it */
8505 writel(CA_MBATT, phba->CAregaddr);
8506 readl(phba->CAregaddr); /* flush */
8507 /* Don't wait for it to finish, just return */
8511 /* Set up null reference to mailbox command */
8512 psli->mbox_active = NULL;
8513 /* Interrupt board to do it */
8514 writel(CA_MBATT, phba->CAregaddr);
8515 readl(phba->CAregaddr); /* flush */
8517 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8518 /* First read mbox status word */
8519 word0 = *((uint32_t *)phba->mbox);
8520 word0 = le32_to_cpu(word0);
8522 /* First read mbox status word */
8523 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8524 spin_unlock_irqrestore(&phba->hbalock,
8526 goto out_not_finished;
8530 /* Read the HBA Host Attention Register */
8531 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8532 spin_unlock_irqrestore(&phba->hbalock,
8534 goto out_not_finished;
8536 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8539 /* Wait for command to complete */
8540 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8541 (!(ha_copy & HA_MBATT) &&
8542 (phba->link_state > LPFC_WARM_START))) {
8543 if (time_after(jiffies, timeout)) {
8544 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8545 spin_unlock_irqrestore(&phba->hbalock,
8547 goto out_not_finished;
8550 /* Check if we took a mbox interrupt while we were
8552 if (((word0 & OWN_CHIP) != OWN_CHIP)
8553 && (evtctr != psli->slistat.mbox_event))
8557 spin_unlock_irqrestore(&phba->hbalock,
8560 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8563 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8564 /* First copy command data */
8565 word0 = *((uint32_t *)phba->mbox);
8566 word0 = le32_to_cpu(word0);
8567 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8570 /* Check real SLIM for any errors */
8571 slimword0 = readl(phba->MBslimaddr);
8572 slimmb = (MAILBOX_t *) & slimword0;
8573 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8574 && slimmb->mbxStatus) {
8581 /* First copy command data */
8582 word0 = readl(phba->MBslimaddr);
8584 /* Read the HBA Host Attention Register */
8585 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8586 spin_unlock_irqrestore(&phba->hbalock,
8588 goto out_not_finished;
8592 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8593 /* copy results back to user */
8594 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8596 /* Copy the mailbox extension data */
8597 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8598 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8600 pmbox->out_ext_byte_len);
8603 /* First copy command data */
8604 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8606 /* Copy the mailbox extension data */
8607 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8608 lpfc_memcpy_from_slim(
8611 MAILBOX_HBA_EXT_OFFSET,
8612 pmbox->out_ext_byte_len);
8616 writel(HA_MBATT, phba->HAregaddr);
8617 readl(phba->HAregaddr); /* flush */
8619 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8620 status = mbx->mbxStatus;
8623 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8627 if (processing_queue) {
8628 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8629 lpfc_mbox_cmpl_put(phba, pmbox);
8631 return MBX_NOT_FINISHED;
8635 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8636 * @phba: Pointer to HBA context object.
8638 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8639 * the driver internal pending mailbox queue. It will then try to wait out the
8640 * possible outstanding mailbox command before return.
8643 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8644 * the outstanding mailbox command timed out.
8647 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8649 struct lpfc_sli *psli = &phba->sli;
8651 unsigned long timeout = 0;
8653 /* Mark the asynchronous mailbox command posting as blocked */
8654 spin_lock_irq(&phba->hbalock);
8655 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8656 /* Determine how long we might wait for the active mailbox
8657 * command to be gracefully completed by firmware.
8659 if (phba->sli.mbox_active)
8660 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8661 phba->sli.mbox_active) *
8663 spin_unlock_irq(&phba->hbalock);
8665 /* Make sure the mailbox is really active */
8667 lpfc_sli4_process_missed_mbox_completions(phba);
8669 /* Wait for the outstnading mailbox command to complete */
8670 while (phba->sli.mbox_active) {
8671 /* Check active mailbox complete status every 2ms */
8673 if (time_after(jiffies, timeout)) {
8674 /* Timeout, marked the outstanding cmd not complete */
8680 /* Can not cleanly block async mailbox command, fails it */
8682 spin_lock_irq(&phba->hbalock);
8683 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8684 spin_unlock_irq(&phba->hbalock);
8690 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8691 * @phba: Pointer to HBA context object.
8693 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8694 * commands from the driver internal pending mailbox queue. It makes sure
8695 * that there is no outstanding mailbox command before resuming posting
8696 * asynchronous mailbox commands. If, for any reason, there is outstanding
8697 * mailbox command, it will try to wait it out before resuming asynchronous
8698 * mailbox command posting.
8701 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8703 struct lpfc_sli *psli = &phba->sli;
8705 spin_lock_irq(&phba->hbalock);
8706 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8707 /* Asynchronous mailbox posting is not blocked, do nothing */
8708 spin_unlock_irq(&phba->hbalock);
8712 /* Outstanding synchronous mailbox command is guaranteed to be done,
8713 * successful or timeout, after timing-out the outstanding mailbox
8714 * command shall always be removed, so just unblock posting async
8715 * mailbox command and resume
8717 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8718 spin_unlock_irq(&phba->hbalock);
8720 /* wake up worker thread to post asynchronous mailbox command */
8721 lpfc_worker_wake_up(phba);
8725 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8726 * @phba: Pointer to HBA context object.
8727 * @mboxq: Pointer to mailbox object.
8729 * The function waits for the bootstrap mailbox register ready bit from
8730 * port for twice the regular mailbox command timeout value.
8732 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8733 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8736 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8739 unsigned long timeout;
8740 struct lpfc_register bmbx_reg;
8742 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8746 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8747 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8751 if (time_after(jiffies, timeout))
8752 return MBXERR_ERROR;
8753 } while (!db_ready);
8759 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8760 * @phba: Pointer to HBA context object.
8761 * @mboxq: Pointer to mailbox object.
8763 * The function posts a mailbox to the port. The mailbox is expected
8764 * to be comletely filled in and ready for the port to operate on it.
8765 * This routine executes a synchronous completion operation on the
8766 * mailbox by polling for its completion.
8768 * The caller must not be holding any locks when calling this routine.
8771 * MBX_SUCCESS - mailbox posted successfully
8772 * Any of the MBX error values.
8775 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8777 int rc = MBX_SUCCESS;
8778 unsigned long iflag;
8779 uint32_t mcqe_status;
8781 struct lpfc_sli *psli = &phba->sli;
8782 struct lpfc_mqe *mb = &mboxq->u.mqe;
8783 struct lpfc_bmbx_create *mbox_rgn;
8784 struct dma_address *dma_address;
8787 * Only one mailbox can be active to the bootstrap mailbox region
8788 * at a time and there is no queueing provided.
8790 spin_lock_irqsave(&phba->hbalock, iflag);
8791 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8792 spin_unlock_irqrestore(&phba->hbalock, iflag);
8793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8794 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8795 "cannot issue Data: x%x x%x\n",
8796 mboxq->vport ? mboxq->vport->vpi : 0,
8797 mboxq->u.mb.mbxCommand,
8798 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8799 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8800 psli->sli_flag, MBX_POLL);
8801 return MBXERR_ERROR;
8803 /* The server grabs the token and owns it until release */
8804 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8805 phba->sli.mbox_active = mboxq;
8806 spin_unlock_irqrestore(&phba->hbalock, iflag);
8808 /* wait for bootstrap mbox register for readyness */
8809 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8813 * Initialize the bootstrap memory region to avoid stale data areas
8814 * in the mailbox post. Then copy the caller's mailbox contents to
8815 * the bmbx mailbox region.
8817 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8818 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8819 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8820 sizeof(struct lpfc_mqe));
8822 /* Post the high mailbox dma address to the port and wait for ready. */
8823 dma_address = &phba->sli4_hba.bmbx.dma_address;
8824 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8826 /* wait for bootstrap mbox register for hi-address write done */
8827 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8831 /* Post the low mailbox dma address to the port. */
8832 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8834 /* wait for bootstrap mbox register for low address write done */
8835 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8840 * Read the CQ to ensure the mailbox has completed.
8841 * If so, update the mailbox status so that the upper layers
8842 * can complete the request normally.
8844 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8845 sizeof(struct lpfc_mqe));
8846 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8847 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8848 sizeof(struct lpfc_mcqe));
8849 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8851 * When the CQE status indicates a failure and the mailbox status
8852 * indicates success then copy the CQE status into the mailbox status
8853 * (and prefix it with x4000).
8855 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8856 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8857 bf_set(lpfc_mqe_status, mb,
8858 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8861 lpfc_sli4_swap_str(phba, mboxq);
8863 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8864 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8865 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8866 " x%x x%x CQ: x%x x%x x%x x%x\n",
8867 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8868 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8869 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8870 bf_get(lpfc_mqe_status, mb),
8871 mb->un.mb_words[0], mb->un.mb_words[1],
8872 mb->un.mb_words[2], mb->un.mb_words[3],
8873 mb->un.mb_words[4], mb->un.mb_words[5],
8874 mb->un.mb_words[6], mb->un.mb_words[7],
8875 mb->un.mb_words[8], mb->un.mb_words[9],
8876 mb->un.mb_words[10], mb->un.mb_words[11],
8877 mb->un.mb_words[12], mboxq->mcqe.word0,
8878 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8879 mboxq->mcqe.trailer);
8881 /* We are holding the token, no needed for lock when release */
8882 spin_lock_irqsave(&phba->hbalock, iflag);
8883 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8884 phba->sli.mbox_active = NULL;
8885 spin_unlock_irqrestore(&phba->hbalock, iflag);
8890 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8891 * @phba: Pointer to HBA context object.
8892 * @mboxq: Pointer to mailbox object.
8893 * @flag: Flag indicating how the mailbox need to be processed.
8895 * This function is called by discovery code and HBA management code to submit
8896 * a mailbox command to firmware with SLI-4 interface spec.
8898 * Return codes the caller owns the mailbox command after the return of the
8902 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8905 struct lpfc_sli *psli = &phba->sli;
8906 unsigned long iflags;
8909 /* dump from issue mailbox command if setup */
8910 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8912 rc = lpfc_mbox_dev_check(phba);
8914 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8915 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8916 "cannot issue Data: x%x x%x\n",
8917 mboxq->vport ? mboxq->vport->vpi : 0,
8918 mboxq->u.mb.mbxCommand,
8919 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8920 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8921 psli->sli_flag, flag);
8922 goto out_not_finished;
8925 /* Detect polling mode and jump to a handler */
8926 if (!phba->sli4_hba.intr_enable) {
8927 if (flag == MBX_POLL)
8928 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8931 if (rc != MBX_SUCCESS)
8932 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8933 "(%d):2541 Mailbox command x%x "
8934 "(x%x/x%x) failure: "
8935 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8937 mboxq->vport ? mboxq->vport->vpi : 0,
8938 mboxq->u.mb.mbxCommand,
8939 lpfc_sli_config_mbox_subsys_get(phba,
8941 lpfc_sli_config_mbox_opcode_get(phba,
8943 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8944 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8945 bf_get(lpfc_mcqe_ext_status,
8947 psli->sli_flag, flag);
8949 } else if (flag == MBX_POLL) {
8950 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8951 "(%d):2542 Try to issue mailbox command "
8952 "x%x (x%x/x%x) synchronously ahead of async "
8953 "mailbox command queue: x%x x%x\n",
8954 mboxq->vport ? mboxq->vport->vpi : 0,
8955 mboxq->u.mb.mbxCommand,
8956 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8957 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8958 psli->sli_flag, flag);
8959 /* Try to block the asynchronous mailbox posting */
8960 rc = lpfc_sli4_async_mbox_block(phba);
8962 /* Successfully blocked, now issue sync mbox cmd */
8963 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8964 if (rc != MBX_SUCCESS)
8965 lpfc_printf_log(phba, KERN_WARNING,
8967 "(%d):2597 Sync Mailbox command "
8968 "x%x (x%x/x%x) failure: "
8969 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8971 mboxq->vport ? mboxq->vport->vpi : 0,
8972 mboxq->u.mb.mbxCommand,
8973 lpfc_sli_config_mbox_subsys_get(phba,
8975 lpfc_sli_config_mbox_opcode_get(phba,
8977 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8978 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8979 bf_get(lpfc_mcqe_ext_status,
8981 psli->sli_flag, flag);
8982 /* Unblock the async mailbox posting afterward */
8983 lpfc_sli4_async_mbox_unblock(phba);
8988 /* Now, interrupt mode asynchronous mailbox command */
8989 rc = lpfc_mbox_cmd_check(phba, mboxq);
8991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8992 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8993 "cannot issue Data: x%x x%x\n",
8994 mboxq->vport ? mboxq->vport->vpi : 0,
8995 mboxq->u.mb.mbxCommand,
8996 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8997 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8998 psli->sli_flag, flag);
8999 goto out_not_finished;
9002 /* Put the mailbox command to the driver internal FIFO */
9003 psli->slistat.mbox_busy++;
9004 spin_lock_irqsave(&phba->hbalock, iflags);
9005 lpfc_mbox_put(phba, mboxq);
9006 spin_unlock_irqrestore(&phba->hbalock, iflags);
9007 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9008 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9009 "x%x (x%x/x%x) x%x x%x x%x\n",
9010 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9011 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9012 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9013 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9014 phba->pport->port_state,
9015 psli->sli_flag, MBX_NOWAIT);
9016 /* Wake up worker thread to transport mailbox command from head */
9017 lpfc_worker_wake_up(phba);
9022 return MBX_NOT_FINISHED;
9026 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9027 * @phba: Pointer to HBA context object.
9029 * This function is called by worker thread to send a mailbox command to
9030 * SLI4 HBA firmware.
9034 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9036 struct lpfc_sli *psli = &phba->sli;
9037 LPFC_MBOXQ_t *mboxq;
9038 int rc = MBX_SUCCESS;
9039 unsigned long iflags;
9040 struct lpfc_mqe *mqe;
9043 /* Check interrupt mode before post async mailbox command */
9044 if (unlikely(!phba->sli4_hba.intr_enable))
9045 return MBX_NOT_FINISHED;
9047 /* Check for mailbox command service token */
9048 spin_lock_irqsave(&phba->hbalock, iflags);
9049 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9050 spin_unlock_irqrestore(&phba->hbalock, iflags);
9051 return MBX_NOT_FINISHED;
9053 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9054 spin_unlock_irqrestore(&phba->hbalock, iflags);
9055 return MBX_NOT_FINISHED;
9057 if (unlikely(phba->sli.mbox_active)) {
9058 spin_unlock_irqrestore(&phba->hbalock, iflags);
9059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9060 "0384 There is pending active mailbox cmd\n");
9061 return MBX_NOT_FINISHED;
9063 /* Take the mailbox command service token */
9064 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9066 /* Get the next mailbox command from head of queue */
9067 mboxq = lpfc_mbox_get(phba);
9069 /* If no more mailbox command waiting for post, we're done */
9071 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9072 spin_unlock_irqrestore(&phba->hbalock, iflags);
9075 phba->sli.mbox_active = mboxq;
9076 spin_unlock_irqrestore(&phba->hbalock, iflags);
9078 /* Check device readiness for posting mailbox command */
9079 rc = lpfc_mbox_dev_check(phba);
9081 /* Driver clean routine will clean up pending mailbox */
9082 goto out_not_finished;
9084 /* Prepare the mbox command to be posted */
9085 mqe = &mboxq->u.mqe;
9086 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9088 /* Start timer for the mbox_tmo and log some mailbox post messages */
9089 mod_timer(&psli->mbox_tmo, (jiffies +
9090 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9092 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9093 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9095 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9096 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9097 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9098 phba->pport->port_state, psli->sli_flag);
9100 if (mbx_cmnd != MBX_HEARTBEAT) {
9102 lpfc_debugfs_disc_trc(mboxq->vport,
9103 LPFC_DISC_TRC_MBOX_VPORT,
9104 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9105 mbx_cmnd, mqe->un.mb_words[0],
9106 mqe->un.mb_words[1]);
9108 lpfc_debugfs_disc_trc(phba->pport,
9110 "MBOX Send: cmd:x%x mb:x%x x%x",
9111 mbx_cmnd, mqe->un.mb_words[0],
9112 mqe->un.mb_words[1]);
9115 psli->slistat.mbox_cmd++;
9117 /* Post the mailbox command to the port */
9118 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9119 if (rc != MBX_SUCCESS) {
9120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9121 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9122 "cannot issue Data: x%x x%x\n",
9123 mboxq->vport ? mboxq->vport->vpi : 0,
9124 mboxq->u.mb.mbxCommand,
9125 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9126 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9127 psli->sli_flag, MBX_NOWAIT);
9128 goto out_not_finished;
9134 spin_lock_irqsave(&phba->hbalock, iflags);
9135 if (phba->sli.mbox_active) {
9136 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9137 __lpfc_mbox_cmpl_put(phba, mboxq);
9138 /* Release the token */
9139 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9140 phba->sli.mbox_active = NULL;
9142 spin_unlock_irqrestore(&phba->hbalock, iflags);
9144 return MBX_NOT_FINISHED;
9148 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9149 * @phba: Pointer to HBA context object.
9150 * @pmbox: Pointer to mailbox object.
9151 * @flag: Flag indicating how the mailbox need to be processed.
9153 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9154 * the API jump table function pointer from the lpfc_hba struct.
9156 * Return codes the caller owns the mailbox command after the return of the
9160 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9162 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9166 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9167 * @phba: The hba struct for which this call is being executed.
9168 * @dev_grp: The HBA PCI-Device group number.
9170 * This routine sets up the mbox interface API function jump table in @phba
9172 * Returns: 0 - success, -ENODEV - failure.
9175 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9179 case LPFC_PCI_DEV_LP:
9180 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9181 phba->lpfc_sli_handle_slow_ring_event =
9182 lpfc_sli_handle_slow_ring_event_s3;
9183 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9184 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9185 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9187 case LPFC_PCI_DEV_OC:
9188 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9189 phba->lpfc_sli_handle_slow_ring_event =
9190 lpfc_sli_handle_slow_ring_event_s4;
9191 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9192 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9193 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9197 "1420 Invalid HBA PCI-device group: 0x%x\n",
9206 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9207 * @phba: Pointer to HBA context object.
9208 * @pring: Pointer to driver SLI ring object.
9209 * @piocb: Pointer to address of newly added command iocb.
9211 * This function is called with hbalock held for SLI3 ports or
9212 * the ring lock held for SLI4 ports to add a command
9213 * iocb to the txq when SLI layer cannot submit the command iocb
9217 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9218 struct lpfc_iocbq *piocb)
9220 if (phba->sli_rev == LPFC_SLI_REV4)
9221 lockdep_assert_held(&pring->ring_lock);
9223 lockdep_assert_held(&phba->hbalock);
9224 /* Insert the caller's iocb in the txq tail for later processing. */
9225 list_add_tail(&piocb->list, &pring->txq);
9229 * lpfc_sli_next_iocb - Get the next iocb in the txq
9230 * @phba: Pointer to HBA context object.
9231 * @pring: Pointer to driver SLI ring object.
9232 * @piocb: Pointer to address of newly added command iocb.
9234 * This function is called with hbalock held before a new
9235 * iocb is submitted to the firmware. This function checks
9236 * txq to flush the iocbs in txq to Firmware before
9237 * submitting new iocbs to the Firmware.
9238 * If there are iocbs in the txq which need to be submitted
9239 * to firmware, lpfc_sli_next_iocb returns the first element
9240 * of the txq after dequeuing it from txq.
9241 * If there is no iocb in the txq then the function will return
9242 * *piocb and *piocb is set to NULL. Caller needs to check
9243 * *piocb to find if there are more commands in the txq.
9245 static struct lpfc_iocbq *
9246 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9247 struct lpfc_iocbq **piocb)
9249 struct lpfc_iocbq * nextiocb;
9251 lockdep_assert_held(&phba->hbalock);
9253 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9263 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9264 * @phba: Pointer to HBA context object.
9265 * @ring_number: SLI ring number to issue iocb on.
9266 * @piocb: Pointer to command iocb.
9267 * @flag: Flag indicating if this command can be put into txq.
9269 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9270 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9271 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9272 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9273 * this function allows only iocbs for posting buffers. This function finds
9274 * next available slot in the command ring and posts the command to the
9275 * available slot and writes the port attention register to request HBA start
9276 * processing new iocb. If there is no slot available in the ring and
9277 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9278 * the function returns IOCB_BUSY.
9280 * This function is called with hbalock held. The function will return success
9281 * after it successfully submit the iocb to firmware or after adding to the
9285 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9286 struct lpfc_iocbq *piocb, uint32_t flag)
9288 struct lpfc_iocbq *nextiocb;
9290 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9292 lockdep_assert_held(&phba->hbalock);
9294 if (piocb->iocb_cmpl && (!piocb->vport) &&
9295 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9296 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9298 "1807 IOCB x%x failed. No vport\n",
9299 piocb->iocb.ulpCommand);
9305 /* If the PCI channel is in offline state, do not post iocbs. */
9306 if (unlikely(pci_channel_offline(phba->pcidev)))
9309 /* If HBA has a deferred error attention, fail the iocb. */
9310 if (unlikely(phba->hba_flag & DEFER_ERATT))
9314 * We should never get an IOCB if we are in a < LINK_DOWN state
9316 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9320 * Check to see if we are blocking IOCB processing because of a
9321 * outstanding event.
9323 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9326 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9328 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9329 * can be issued if the link is not up.
9331 switch (piocb->iocb.ulpCommand) {
9332 case CMD_GEN_REQUEST64_CR:
9333 case CMD_GEN_REQUEST64_CX:
9334 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9335 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9336 FC_RCTL_DD_UNSOL_CMD) ||
9337 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9338 MENLO_TRANSPORT_TYPE))
9342 case CMD_QUE_RING_BUF_CN:
9343 case CMD_QUE_RING_BUF64_CN:
9345 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9346 * completion, iocb_cmpl MUST be 0.
9348 if (piocb->iocb_cmpl)
9349 piocb->iocb_cmpl = NULL;
9351 case CMD_CREATE_XRI_CR:
9352 case CMD_CLOSE_XRI_CN:
9353 case CMD_CLOSE_XRI_CX:
9360 * For FCP commands, we must be in a state where we can process link
9363 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9364 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9368 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9369 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9370 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9373 lpfc_sli_update_ring(phba, pring);
9375 lpfc_sli_update_full_ring(phba, pring);
9378 return IOCB_SUCCESS;
9383 pring->stats.iocb_cmd_delay++;
9387 if (!(flag & SLI_IOCB_RET_IOCB)) {
9388 __lpfc_sli_ringtx_put(phba, pring, piocb);
9389 return IOCB_SUCCESS;
9396 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9397 * @phba: Pointer to HBA context object.
9398 * @piocbq: Pointer to command iocb.
9399 * @sglq: Pointer to the scatter gather queue object.
9401 * This routine converts the bpl or bde that is in the IOCB
9402 * to a sgl list for the sli4 hardware. The physical address
9403 * of the bpl/bde is converted back to a virtual address.
9404 * If the IOCB contains a BPL then the list of BDE's is
9405 * converted to sli4_sge's. If the IOCB contains a single
9406 * BDE then it is converted to a single sli_sge.
9407 * The IOCB is still in cpu endianess so the contents of
9408 * the bpl can be used without byte swapping.
9410 * Returns valid XRI = Success, NO_XRI = Failure.
9413 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9414 struct lpfc_sglq *sglq)
9416 uint16_t xritag = NO_XRI;
9417 struct ulp_bde64 *bpl = NULL;
9418 struct ulp_bde64 bde;
9419 struct sli4_sge *sgl = NULL;
9420 struct lpfc_dmabuf *dmabuf;
9424 uint32_t offset = 0; /* accumulated offset in the sg request list */
9425 int inbound = 0; /* number of sg reply entries inbound from firmware */
9427 if (!piocbq || !sglq)
9430 sgl = (struct sli4_sge *)sglq->sgl;
9431 icmd = &piocbq->iocb;
9432 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9433 return sglq->sli4_xritag;
9434 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9435 numBdes = icmd->un.genreq64.bdl.bdeSize /
9436 sizeof(struct ulp_bde64);
9437 /* The addrHigh and addrLow fields within the IOCB
9438 * have not been byteswapped yet so there is no
9439 * need to swap them back.
9441 if (piocbq->context3)
9442 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9446 bpl = (struct ulp_bde64 *)dmabuf->virt;
9450 for (i = 0; i < numBdes; i++) {
9451 /* Should already be byte swapped. */
9452 sgl->addr_hi = bpl->addrHigh;
9453 sgl->addr_lo = bpl->addrLow;
9455 sgl->word2 = le32_to_cpu(sgl->word2);
9456 if ((i+1) == numBdes)
9457 bf_set(lpfc_sli4_sge_last, sgl, 1);
9459 bf_set(lpfc_sli4_sge_last, sgl, 0);
9460 /* swap the size field back to the cpu so we
9461 * can assign it to the sgl.
9463 bde.tus.w = le32_to_cpu(bpl->tus.w);
9464 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9465 /* The offsets in the sgl need to be accumulated
9466 * separately for the request and reply lists.
9467 * The request is always first, the reply follows.
9469 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9470 /* add up the reply sg entries */
9471 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9473 /* first inbound? reset the offset */
9476 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9477 bf_set(lpfc_sli4_sge_type, sgl,
9478 LPFC_SGE_TYPE_DATA);
9479 offset += bde.tus.f.bdeSize;
9481 sgl->word2 = cpu_to_le32(sgl->word2);
9485 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9486 /* The addrHigh and addrLow fields of the BDE have not
9487 * been byteswapped yet so they need to be swapped
9488 * before putting them in the sgl.
9491 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9493 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9494 sgl->word2 = le32_to_cpu(sgl->word2);
9495 bf_set(lpfc_sli4_sge_last, sgl, 1);
9496 sgl->word2 = cpu_to_le32(sgl->word2);
9498 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9500 return sglq->sli4_xritag;
9504 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9505 * @phba: Pointer to HBA context object.
9506 * @iocbq: Pointer to command iocb.
9507 * @wqe: Pointer to the work queue entry.
9509 * This routine converts the iocb command to its Work Queue Entry
9510 * equivalent. The wqe pointer should not have any fields set when
9511 * this routine is called because it will memcpy over them.
9512 * This routine does not set the CQ_ID or the WQEC bits in the
9515 * Returns: 0 = Success, IOCB_ERROR = Failure.
9518 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9519 union lpfc_wqe128 *wqe)
9521 uint32_t xmit_len = 0, total_len = 0;
9525 uint8_t command_type = ELS_COMMAND_NON_FIP;
9528 uint16_t abrt_iotag;
9529 struct lpfc_iocbq *abrtiocbq;
9530 struct ulp_bde64 *bpl = NULL;
9531 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9533 struct ulp_bde64 bde;
9534 struct lpfc_nodelist *ndlp;
9538 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9539 /* The fcp commands will set command type */
9540 if (iocbq->iocb_flag & LPFC_IO_FCP)
9541 command_type = FCP_COMMAND;
9542 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9543 command_type = ELS_COMMAND_FIP;
9545 command_type = ELS_COMMAND_NON_FIP;
9547 if (phba->fcp_embed_io)
9548 memset(wqe, 0, sizeof(union lpfc_wqe128));
9549 /* Some of the fields are in the right position already */
9550 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9551 /* The ct field has moved so reset */
9552 wqe->generic.wqe_com.word7 = 0;
9553 wqe->generic.wqe_com.word10 = 0;
9555 abort_tag = (uint32_t) iocbq->iotag;
9556 xritag = iocbq->sli4_xritag;
9557 /* words0-2 bpl convert bde */
9558 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9559 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9560 sizeof(struct ulp_bde64);
9561 bpl = (struct ulp_bde64 *)
9562 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9566 /* Should already be byte swapped. */
9567 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9568 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9569 /* swap the size field back to the cpu so we
9570 * can assign it to the sgl.
9572 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9573 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9575 for (i = 0; i < numBdes; i++) {
9576 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9577 total_len += bde.tus.f.bdeSize;
9580 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9582 iocbq->iocb.ulpIoTag = iocbq->iotag;
9583 cmnd = iocbq->iocb.ulpCommand;
9585 switch (iocbq->iocb.ulpCommand) {
9586 case CMD_ELS_REQUEST64_CR:
9587 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9588 ndlp = iocbq->context_un.ndlp;
9590 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9591 if (!iocbq->iocb.ulpLe) {
9592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9593 "2007 Only Limited Edition cmd Format"
9594 " supported 0x%x\n",
9595 iocbq->iocb.ulpCommand);
9599 wqe->els_req.payload_len = xmit_len;
9600 /* Els_reguest64 has a TMO */
9601 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9602 iocbq->iocb.ulpTimeout);
9603 /* Need a VF for word 4 set the vf bit*/
9604 bf_set(els_req64_vf, &wqe->els_req, 0);
9605 /* And a VFID for word 12 */
9606 bf_set(els_req64_vfid, &wqe->els_req, 0);
9607 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9608 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9609 iocbq->iocb.ulpContext);
9610 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9611 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9612 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9613 if (command_type == ELS_COMMAND_FIP)
9614 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9615 >> LPFC_FIP_ELS_ID_SHIFT);
9616 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9617 iocbq->context2)->virt);
9618 if_type = bf_get(lpfc_sli_intf_if_type,
9619 &phba->sli4_hba.sli_intf);
9620 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9621 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9622 *pcmd == ELS_CMD_SCR ||
9623 *pcmd == ELS_CMD_RDF ||
9624 *pcmd == ELS_CMD_RSCN_XMT ||
9625 *pcmd == ELS_CMD_FDISC ||
9626 *pcmd == ELS_CMD_LOGO ||
9627 *pcmd == ELS_CMD_PLOGI)) {
9628 bf_set(els_req64_sp, &wqe->els_req, 1);
9629 bf_set(els_req64_sid, &wqe->els_req,
9630 iocbq->vport->fc_myDID);
9631 if ((*pcmd == ELS_CMD_FLOGI) &&
9632 !(phba->fc_topology ==
9633 LPFC_TOPOLOGY_LOOP))
9634 bf_set(els_req64_sid, &wqe->els_req, 0);
9635 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9636 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9637 phba->vpi_ids[iocbq->vport->vpi]);
9638 } else if (pcmd && iocbq->context1) {
9639 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9640 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9641 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9644 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9645 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9646 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9647 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9648 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9649 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9650 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9651 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9652 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9654 case CMD_XMIT_SEQUENCE64_CX:
9655 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9656 iocbq->iocb.un.ulpWord[3]);
9657 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9658 iocbq->iocb.unsli3.rcvsli3.ox_id);
9659 /* The entire sequence is transmitted for this IOCB */
9660 xmit_len = total_len;
9661 cmnd = CMD_XMIT_SEQUENCE64_CR;
9662 if (phba->link_flag & LS_LOOPBACK_MODE)
9663 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9665 case CMD_XMIT_SEQUENCE64_CR:
9666 /* word3 iocb=io_tag32 wqe=reserved */
9667 wqe->xmit_sequence.rsvd3 = 0;
9668 /* word4 relative_offset memcpy */
9669 /* word5 r_ctl/df_ctl memcpy */
9670 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9671 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9672 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9673 LPFC_WQE_IOD_WRITE);
9674 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9675 LPFC_WQE_LENLOC_WORD12);
9676 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9677 wqe->xmit_sequence.xmit_len = xmit_len;
9678 command_type = OTHER_COMMAND;
9680 case CMD_XMIT_BCAST64_CN:
9681 /* word3 iocb=iotag32 wqe=seq_payload_len */
9682 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9683 /* word4 iocb=rsvd wqe=rsvd */
9684 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9685 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9686 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9687 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9688 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9689 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9690 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9691 LPFC_WQE_LENLOC_WORD3);
9692 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9694 case CMD_FCP_IWRITE64_CR:
9695 command_type = FCP_COMMAND_DATA_OUT;
9696 /* word3 iocb=iotag wqe=payload_offset_len */
9697 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9698 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9699 xmit_len + sizeof(struct fcp_rsp));
9700 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9702 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9703 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9704 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9705 iocbq->iocb.ulpFCP2Rcvy);
9706 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9707 /* Always open the exchange */
9708 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9709 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9710 LPFC_WQE_LENLOC_WORD4);
9711 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9712 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9713 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9714 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9715 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9716 if (iocbq->priority) {
9717 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9718 (iocbq->priority << 1));
9720 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9721 (phba->cfg_XLanePriority << 1));
9724 /* Note, word 10 is already initialized to 0 */
9726 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9727 if (phba->cfg_enable_pbde)
9728 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9730 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9732 if (phba->fcp_embed_io) {
9733 struct lpfc_io_buf *lpfc_cmd;
9734 struct sli4_sge *sgl;
9735 struct fcp_cmnd *fcp_cmnd;
9738 /* 128 byte wqe support here */
9740 lpfc_cmd = iocbq->context1;
9741 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9742 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9744 /* Word 0-2 - FCP_CMND */
9745 wqe->generic.bde.tus.f.bdeFlags =
9746 BUFF_TYPE_BDE_IMMED;
9747 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9748 wqe->generic.bde.addrHigh = 0;
9749 wqe->generic.bde.addrLow = 88; /* Word 22 */
9751 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9752 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9754 /* Word 22-29 FCP CMND Payload */
9755 ptr = &wqe->words[22];
9756 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9759 case CMD_FCP_IREAD64_CR:
9760 /* word3 iocb=iotag wqe=payload_offset_len */
9761 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9762 bf_set(payload_offset_len, &wqe->fcp_iread,
9763 xmit_len + sizeof(struct fcp_rsp));
9764 bf_set(cmd_buff_len, &wqe->fcp_iread,
9766 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9767 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9768 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9769 iocbq->iocb.ulpFCP2Rcvy);
9770 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9771 /* Always open the exchange */
9772 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9773 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9774 LPFC_WQE_LENLOC_WORD4);
9775 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9776 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9777 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9778 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9779 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9780 if (iocbq->priority) {
9781 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9782 (iocbq->priority << 1));
9784 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9785 (phba->cfg_XLanePriority << 1));
9788 /* Note, word 10 is already initialized to 0 */
9790 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9791 if (phba->cfg_enable_pbde)
9792 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9794 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9796 if (phba->fcp_embed_io) {
9797 struct lpfc_io_buf *lpfc_cmd;
9798 struct sli4_sge *sgl;
9799 struct fcp_cmnd *fcp_cmnd;
9802 /* 128 byte wqe support here */
9804 lpfc_cmd = iocbq->context1;
9805 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9806 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9808 /* Word 0-2 - FCP_CMND */
9809 wqe->generic.bde.tus.f.bdeFlags =
9810 BUFF_TYPE_BDE_IMMED;
9811 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9812 wqe->generic.bde.addrHigh = 0;
9813 wqe->generic.bde.addrLow = 88; /* Word 22 */
9815 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9816 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9818 /* Word 22-29 FCP CMND Payload */
9819 ptr = &wqe->words[22];
9820 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9823 case CMD_FCP_ICMND64_CR:
9824 /* word3 iocb=iotag wqe=payload_offset_len */
9825 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9826 bf_set(payload_offset_len, &wqe->fcp_icmd,
9827 xmit_len + sizeof(struct fcp_rsp));
9828 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9830 /* word3 iocb=IO_TAG wqe=reserved */
9831 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9832 /* Always open the exchange */
9833 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9834 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9835 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9836 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9837 LPFC_WQE_LENLOC_NONE);
9838 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9839 iocbq->iocb.ulpFCP2Rcvy);
9840 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9841 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9842 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9843 if (iocbq->priority) {
9844 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9845 (iocbq->priority << 1));
9847 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9848 (phba->cfg_XLanePriority << 1));
9851 /* Note, word 10 is already initialized to 0 */
9853 if (phba->fcp_embed_io) {
9854 struct lpfc_io_buf *lpfc_cmd;
9855 struct sli4_sge *sgl;
9856 struct fcp_cmnd *fcp_cmnd;
9859 /* 128 byte wqe support here */
9861 lpfc_cmd = iocbq->context1;
9862 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9863 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9865 /* Word 0-2 - FCP_CMND */
9866 wqe->generic.bde.tus.f.bdeFlags =
9867 BUFF_TYPE_BDE_IMMED;
9868 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9869 wqe->generic.bde.addrHigh = 0;
9870 wqe->generic.bde.addrLow = 88; /* Word 22 */
9872 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9873 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9875 /* Word 22-29 FCP CMND Payload */
9876 ptr = &wqe->words[22];
9877 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9880 case CMD_GEN_REQUEST64_CR:
9881 /* For this command calculate the xmit length of the
9885 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9886 sizeof(struct ulp_bde64);
9887 for (i = 0; i < numBdes; i++) {
9888 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9889 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9891 xmit_len += bde.tus.f.bdeSize;
9893 /* word3 iocb=IO_TAG wqe=request_payload_len */
9894 wqe->gen_req.request_payload_len = xmit_len;
9895 /* word4 iocb=parameter wqe=relative_offset memcpy */
9896 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9897 /* word6 context tag copied in memcpy */
9898 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9899 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9901 "2015 Invalid CT %x command 0x%x\n",
9902 ct, iocbq->iocb.ulpCommand);
9905 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9906 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9907 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9908 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9909 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9910 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9911 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9912 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9913 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9914 command_type = OTHER_COMMAND;
9916 case CMD_XMIT_ELS_RSP64_CX:
9917 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9918 /* words0-2 BDE memcpy */
9919 /* word3 iocb=iotag32 wqe=response_payload_len */
9920 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9922 wqe->xmit_els_rsp.word4 = 0;
9923 /* word5 iocb=rsvd wge=did */
9924 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9925 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9927 if_type = bf_get(lpfc_sli_intf_if_type,
9928 &phba->sli4_hba.sli_intf);
9929 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9930 if (iocbq->vport->fc_flag & FC_PT2PT) {
9931 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9932 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9933 iocbq->vport->fc_myDID);
9934 if (iocbq->vport->fc_myDID == Fabric_DID) {
9936 &wqe->xmit_els_rsp.wqe_dest, 0);
9940 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9941 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9942 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9943 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9944 iocbq->iocb.unsli3.rcvsli3.ox_id);
9945 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9946 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9947 phba->vpi_ids[iocbq->vport->vpi]);
9948 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9949 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9950 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9951 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9952 LPFC_WQE_LENLOC_WORD3);
9953 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9954 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9955 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9956 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9957 iocbq->context2)->virt);
9958 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9959 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9960 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9961 iocbq->vport->fc_myDID);
9962 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9963 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9964 phba->vpi_ids[phba->pport->vpi]);
9966 command_type = OTHER_COMMAND;
9968 case CMD_CLOSE_XRI_CN:
9969 case CMD_ABORT_XRI_CN:
9970 case CMD_ABORT_XRI_CX:
9971 /* words 0-2 memcpy should be 0 rserved */
9972 /* port will send abts */
9973 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9974 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9975 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9976 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9980 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9982 * The link is down, or the command was ELS_FIP
9983 * so the fw does not need to send abts
9986 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9988 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9989 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9990 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9991 wqe->abort_cmd.rsrvd5 = 0;
9992 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9993 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9994 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9996 * The abort handler will send us CMD_ABORT_XRI_CN or
9997 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9999 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10000 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10001 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10002 LPFC_WQE_LENLOC_NONE);
10003 cmnd = CMD_ABORT_XRI_CX;
10004 command_type = OTHER_COMMAND;
10007 case CMD_XMIT_BLS_RSP64_CX:
10008 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10009 /* As BLS ABTS RSP WQE is very different from other WQEs,
10010 * we re-construct this WQE here based on information in
10011 * iocbq from scratch.
10013 memset(wqe, 0, sizeof(*wqe));
10014 /* OX_ID is invariable to who sent ABTS to CT exchange */
10015 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10016 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10017 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10018 LPFC_ABTS_UNSOL_INT) {
10019 /* ABTS sent by initiator to CT exchange, the
10020 * RX_ID field will be filled with the newly
10021 * allocated responder XRI.
10023 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10024 iocbq->sli4_xritag);
10026 /* ABTS sent by responder to CT exchange, the
10027 * RX_ID field will be filled with the responder
10030 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10031 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10033 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10034 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10037 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10039 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10040 iocbq->iocb.ulpContext);
10041 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10042 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10043 phba->vpi_ids[phba->pport->vpi]);
10044 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10045 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10046 LPFC_WQE_LENLOC_NONE);
10047 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10048 command_type = OTHER_COMMAND;
10049 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10050 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10051 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10052 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10053 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10054 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10055 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10059 case CMD_SEND_FRAME:
10060 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10061 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10062 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10063 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10064 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10065 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10066 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10067 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10068 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10069 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10070 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10072 case CMD_XRI_ABORTED_CX:
10073 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10074 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10075 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10076 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10077 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10079 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10080 "2014 Invalid command 0x%x\n",
10081 iocbq->iocb.ulpCommand);
10086 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10087 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10088 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10089 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10090 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10091 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10092 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10093 LPFC_IO_DIF_INSERT);
10094 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10095 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10096 wqe->generic.wqe_com.abort_tag = abort_tag;
10097 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10098 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10099 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10100 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10105 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10106 * @phba: Pointer to HBA context object.
10107 * @ring_number: SLI ring number to issue iocb on.
10108 * @piocb: Pointer to command iocb.
10109 * @flag: Flag indicating if this command can be put into txq.
10111 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10112 * an iocb command to an HBA with SLI-4 interface spec.
10114 * This function is called with ringlock held. The function will return success
10115 * after it successfully submit the iocb to firmware or after adding to the
10119 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10120 struct lpfc_iocbq *piocb, uint32_t flag)
10122 struct lpfc_sglq *sglq;
10123 union lpfc_wqe128 wqe;
10124 struct lpfc_queue *wq;
10125 struct lpfc_sli_ring *pring;
10128 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10129 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10130 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10132 wq = phba->sli4_hba.els_wq;
10135 /* Get corresponding ring */
10139 * The WQE can be either 64 or 128 bytes,
10142 lockdep_assert_held(&pring->ring_lock);
10144 if (piocb->sli4_xritag == NO_XRI) {
10145 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10146 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10149 if (!list_empty(&pring->txq)) {
10150 if (!(flag & SLI_IOCB_RET_IOCB)) {
10151 __lpfc_sli_ringtx_put(phba,
10153 return IOCB_SUCCESS;
10158 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10160 if (!(flag & SLI_IOCB_RET_IOCB)) {
10161 __lpfc_sli_ringtx_put(phba,
10164 return IOCB_SUCCESS;
10170 } else if (piocb->iocb_flag & LPFC_IO_FCP)
10171 /* These IO's already have an XRI and a mapped sgl. */
10175 * This is a continuation of a commandi,(CX) so this
10176 * sglq is on the active list
10178 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10184 piocb->sli4_lxritag = sglq->sli4_lxritag;
10185 piocb->sli4_xritag = sglq->sli4_xritag;
10186 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10190 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10193 if (lpfc_sli4_wq_put(wq, &wqe))
10195 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10201 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10203 * This routine wraps the actual lockless version for issusing IOCB function
10204 * pointer from the lpfc_hba struct.
10207 * IOCB_ERROR - Error
10208 * IOCB_SUCCESS - Success
10212 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10213 struct lpfc_iocbq *piocb, uint32_t flag)
10215 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10219 * lpfc_sli_api_table_setup - Set up sli api function jump table
10220 * @phba: The hba struct for which this call is being executed.
10221 * @dev_grp: The HBA PCI-Device group number.
10223 * This routine sets up the SLI interface API function jump table in @phba
10225 * Returns: 0 - success, -ENODEV - failure.
10228 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10232 case LPFC_PCI_DEV_LP:
10233 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10234 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10236 case LPFC_PCI_DEV_OC:
10237 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10238 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10241 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10242 "1419 Invalid HBA PCI-device group: 0x%x\n",
10247 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10252 * lpfc_sli4_calc_ring - Calculates which ring to use
10253 * @phba: Pointer to HBA context object.
10254 * @piocb: Pointer to command iocb.
10256 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10257 * hba_wqidx, thus we need to calculate the corresponding ring.
10258 * Since ABORTS must go on the same WQ of the command they are
10259 * aborting, we use command's hba_wqidx.
10261 struct lpfc_sli_ring *
10262 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10264 struct lpfc_io_buf *lpfc_cmd;
10266 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10267 if (unlikely(!phba->sli4_hba.hdwq))
10270 * for abort iocb hba_wqidx should already
10271 * be setup based on what work queue we used.
10273 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10274 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10275 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10277 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10279 if (unlikely(!phba->sli4_hba.els_wq))
10281 piocb->hba_wqidx = 0;
10282 return phba->sli4_hba.els_wq->pring;
10287 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10288 * @phba: Pointer to HBA context object.
10289 * @ring_number: Ring number
10290 * @piocb: Pointer to command iocb.
10291 * @flag: Flag indicating if this command can be put into txq.
10293 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10294 * function. This function gets the hbalock and calls
10295 * __lpfc_sli_issue_iocb function and will return the error returned
10296 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10297 * functions which do not hold hbalock.
10300 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10301 struct lpfc_iocbq *piocb, uint32_t flag)
10303 struct lpfc_sli_ring *pring;
10304 struct lpfc_queue *eq;
10305 unsigned long iflags;
10308 if (phba->sli_rev == LPFC_SLI_REV4) {
10309 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10311 pring = lpfc_sli4_calc_ring(phba, piocb);
10312 if (unlikely(pring == NULL))
10315 spin_lock_irqsave(&pring->ring_lock, iflags);
10316 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10317 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10319 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10321 /* For now, SLI2/3 will still use hbalock */
10322 spin_lock_irqsave(&phba->hbalock, iflags);
10323 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10324 spin_unlock_irqrestore(&phba->hbalock, iflags);
10330 * lpfc_extra_ring_setup - Extra ring setup function
10331 * @phba: Pointer to HBA context object.
10333 * This function is called while driver attaches with the
10334 * HBA to setup the extra ring. The extra ring is used
10335 * only when driver needs to support target mode functionality
10336 * or IP over FC functionalities.
10338 * This function is called with no lock held. SLI3 only.
10341 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10343 struct lpfc_sli *psli;
10344 struct lpfc_sli_ring *pring;
10348 /* Adjust cmd/rsp ring iocb entries more evenly */
10350 /* Take some away from the FCP ring */
10351 pring = &psli->sli3_ring[LPFC_FCP_RING];
10352 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10353 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10354 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10355 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10357 /* and give them to the extra ring */
10358 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10360 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10361 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10362 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10363 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10365 /* Setup default profile for this ring */
10366 pring->iotag_max = 4096;
10367 pring->num_mask = 1;
10368 pring->prt[0].profile = 0; /* Mask 0 */
10369 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10370 pring->prt[0].type = phba->cfg_multi_ring_type;
10371 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10376 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10377 struct lpfc_nodelist *ndlp)
10379 unsigned long iflags;
10380 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10382 spin_lock_irqsave(&phba->hbalock, iflags);
10383 if (!list_empty(&evtp->evt_listp)) {
10384 spin_unlock_irqrestore(&phba->hbalock, iflags);
10388 /* Incrementing the reference count until the queued work is done. */
10389 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10390 if (!evtp->evt_arg1) {
10391 spin_unlock_irqrestore(&phba->hbalock, iflags);
10394 evtp->evt = LPFC_EVT_RECOVER_PORT;
10395 list_add_tail(&evtp->evt_listp, &phba->work_list);
10396 spin_unlock_irqrestore(&phba->hbalock, iflags);
10398 lpfc_worker_wake_up(phba);
10401 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10402 * @phba: Pointer to HBA context object.
10403 * @iocbq: Pointer to iocb object.
10405 * The async_event handler calls this routine when it receives
10406 * an ASYNC_STATUS_CN event from the port. The port generates
10407 * this event when an Abort Sequence request to an rport fails
10408 * twice in succession. The abort could be originated by the
10409 * driver or by the port. The ABTS could have been for an ELS
10410 * or FCP IO. The port only generates this event when an ABTS
10411 * fails to complete after one retry.
10414 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10415 struct lpfc_iocbq *iocbq)
10417 struct lpfc_nodelist *ndlp = NULL;
10418 uint16_t rpi = 0, vpi = 0;
10419 struct lpfc_vport *vport = NULL;
10421 /* The rpi in the ulpContext is vport-sensitive. */
10422 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10423 rpi = iocbq->iocb.ulpContext;
10425 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10426 "3092 Port generated ABTS async event "
10427 "on vpi %d rpi %d status 0x%x\n",
10428 vpi, rpi, iocbq->iocb.ulpStatus);
10430 vport = lpfc_find_vport_by_vpid(phba, vpi);
10433 ndlp = lpfc_findnode_rpi(vport, rpi);
10434 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10437 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10438 lpfc_sli_abts_recover_port(vport, ndlp);
10442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10443 "3095 Event Context not found, no "
10444 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10445 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10449 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10450 * @phba: pointer to HBA context object.
10451 * @ndlp: nodelist pointer for the impacted rport.
10452 * @axri: pointer to the wcqe containing the failed exchange.
10454 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10455 * port. The port generates this event when an abort exchange request to an
10456 * rport fails twice in succession with no reply. The abort could be originated
10457 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10460 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10461 struct lpfc_nodelist *ndlp,
10462 struct sli4_wcqe_xri_aborted *axri)
10464 uint32_t ext_status = 0;
10466 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10467 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10468 "3115 Node Context not found, driver "
10469 "ignoring abts err event\n");
10473 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10474 "3116 Port generated FCP XRI ABORT event on "
10475 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10476 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10477 bf_get(lpfc_wcqe_xa_xri, axri),
10478 bf_get(lpfc_wcqe_xa_status, axri),
10482 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10483 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10484 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10486 ext_status = axri->parameter & IOERR_PARAM_MASK;
10487 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10488 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10489 lpfc_sli_post_recovery_event(phba, ndlp);
10493 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10494 * @phba: Pointer to HBA context object.
10495 * @pring: Pointer to driver SLI ring object.
10496 * @iocbq: Pointer to iocb object.
10498 * This function is called by the slow ring event handler
10499 * function when there is an ASYNC event iocb in the ring.
10500 * This function is called with no lock held.
10501 * Currently this function handles only temperature related
10502 * ASYNC events. The function decodes the temperature sensor
10503 * event message and posts events for the management applications.
10506 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10507 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10511 struct temp_event temp_event_data;
10512 struct Scsi_Host *shost;
10515 icmd = &iocbq->iocb;
10516 evt_code = icmd->un.asyncstat.evt_code;
10518 switch (evt_code) {
10519 case ASYNC_TEMP_WARN:
10520 case ASYNC_TEMP_SAFE:
10521 temp_event_data.data = (uint32_t) icmd->ulpContext;
10522 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10523 if (evt_code == ASYNC_TEMP_WARN) {
10524 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10525 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10526 "0347 Adapter is very hot, please take "
10527 "corrective action. temperature : %d Celsius\n",
10528 (uint32_t) icmd->ulpContext);
10530 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10532 "0340 Adapter temperature is OK now. "
10533 "temperature : %d Celsius\n",
10534 (uint32_t) icmd->ulpContext);
10537 /* Send temperature change event to applications */
10538 shost = lpfc_shost_from_vport(phba->pport);
10539 fc_host_post_vendor_event(shost, fc_get_event_number(),
10540 sizeof(temp_event_data), (char *) &temp_event_data,
10541 LPFC_NL_VENDOR_ID);
10543 case ASYNC_STATUS_CN:
10544 lpfc_sli_abts_err_handler(phba, iocbq);
10547 iocb_w = (uint32_t *) icmd;
10548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10549 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10551 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10552 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10553 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10554 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10555 pring->ringno, icmd->un.asyncstat.evt_code,
10556 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10557 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10558 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10559 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10567 * lpfc_sli4_setup - SLI ring setup function
10568 * @phba: Pointer to HBA context object.
10570 * lpfc_sli_setup sets up rings of the SLI interface with
10571 * number of iocbs per ring and iotags. This function is
10572 * called while driver attach to the HBA and before the
10573 * interrupts are enabled. So there is no need for locking.
10575 * This function always returns 0.
10578 lpfc_sli4_setup(struct lpfc_hba *phba)
10580 struct lpfc_sli_ring *pring;
10582 pring = phba->sli4_hba.els_wq->pring;
10583 pring->num_mask = LPFC_MAX_RING_MASK;
10584 pring->prt[0].profile = 0; /* Mask 0 */
10585 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10586 pring->prt[0].type = FC_TYPE_ELS;
10587 pring->prt[0].lpfc_sli_rcv_unsol_event =
10588 lpfc_els_unsol_event;
10589 pring->prt[1].profile = 0; /* Mask 1 */
10590 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10591 pring->prt[1].type = FC_TYPE_ELS;
10592 pring->prt[1].lpfc_sli_rcv_unsol_event =
10593 lpfc_els_unsol_event;
10594 pring->prt[2].profile = 0; /* Mask 2 */
10595 /* NameServer Inquiry */
10596 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10598 pring->prt[2].type = FC_TYPE_CT;
10599 pring->prt[2].lpfc_sli_rcv_unsol_event =
10600 lpfc_ct_unsol_event;
10601 pring->prt[3].profile = 0; /* Mask 3 */
10602 /* NameServer response */
10603 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10605 pring->prt[3].type = FC_TYPE_CT;
10606 pring->prt[3].lpfc_sli_rcv_unsol_event =
10607 lpfc_ct_unsol_event;
10612 * lpfc_sli_setup - SLI ring setup function
10613 * @phba: Pointer to HBA context object.
10615 * lpfc_sli_setup sets up rings of the SLI interface with
10616 * number of iocbs per ring and iotags. This function is
10617 * called while driver attach to the HBA and before the
10618 * interrupts are enabled. So there is no need for locking.
10620 * This function always returns 0. SLI3 only.
10623 lpfc_sli_setup(struct lpfc_hba *phba)
10625 int i, totiocbsize = 0;
10626 struct lpfc_sli *psli = &phba->sli;
10627 struct lpfc_sli_ring *pring;
10629 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10630 psli->sli_flag = 0;
10632 psli->iocbq_lookup = NULL;
10633 psli->iocbq_lookup_len = 0;
10634 psli->last_iotag = 0;
10636 for (i = 0; i < psli->num_rings; i++) {
10637 pring = &psli->sli3_ring[i];
10639 case LPFC_FCP_RING: /* ring 0 - FCP */
10640 /* numCiocb and numRiocb are used in config_port */
10641 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10642 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10643 pring->sli.sli3.numCiocb +=
10644 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10645 pring->sli.sli3.numRiocb +=
10646 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10647 pring->sli.sli3.numCiocb +=
10648 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10649 pring->sli.sli3.numRiocb +=
10650 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10651 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10652 SLI3_IOCB_CMD_SIZE :
10653 SLI2_IOCB_CMD_SIZE;
10654 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10655 SLI3_IOCB_RSP_SIZE :
10656 SLI2_IOCB_RSP_SIZE;
10657 pring->iotag_ctr = 0;
10659 (phba->cfg_hba_queue_depth * 2);
10660 pring->fast_iotag = pring->iotag_max;
10661 pring->num_mask = 0;
10663 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10664 /* numCiocb and numRiocb are used in config_port */
10665 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10666 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10667 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10668 SLI3_IOCB_CMD_SIZE :
10669 SLI2_IOCB_CMD_SIZE;
10670 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10671 SLI3_IOCB_RSP_SIZE :
10672 SLI2_IOCB_RSP_SIZE;
10673 pring->iotag_max = phba->cfg_hba_queue_depth;
10674 pring->num_mask = 0;
10676 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10677 /* numCiocb and numRiocb are used in config_port */
10678 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10679 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10680 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10681 SLI3_IOCB_CMD_SIZE :
10682 SLI2_IOCB_CMD_SIZE;
10683 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10684 SLI3_IOCB_RSP_SIZE :
10685 SLI2_IOCB_RSP_SIZE;
10686 pring->fast_iotag = 0;
10687 pring->iotag_ctr = 0;
10688 pring->iotag_max = 4096;
10689 pring->lpfc_sli_rcv_async_status =
10690 lpfc_sli_async_event_handler;
10691 pring->num_mask = LPFC_MAX_RING_MASK;
10692 pring->prt[0].profile = 0; /* Mask 0 */
10693 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10694 pring->prt[0].type = FC_TYPE_ELS;
10695 pring->prt[0].lpfc_sli_rcv_unsol_event =
10696 lpfc_els_unsol_event;
10697 pring->prt[1].profile = 0; /* Mask 1 */
10698 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10699 pring->prt[1].type = FC_TYPE_ELS;
10700 pring->prt[1].lpfc_sli_rcv_unsol_event =
10701 lpfc_els_unsol_event;
10702 pring->prt[2].profile = 0; /* Mask 2 */
10703 /* NameServer Inquiry */
10704 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10706 pring->prt[2].type = FC_TYPE_CT;
10707 pring->prt[2].lpfc_sli_rcv_unsol_event =
10708 lpfc_ct_unsol_event;
10709 pring->prt[3].profile = 0; /* Mask 3 */
10710 /* NameServer response */
10711 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10713 pring->prt[3].type = FC_TYPE_CT;
10714 pring->prt[3].lpfc_sli_rcv_unsol_event =
10715 lpfc_ct_unsol_event;
10718 totiocbsize += (pring->sli.sli3.numCiocb *
10719 pring->sli.sli3.sizeCiocb) +
10720 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10722 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10723 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10724 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10725 "SLI2 SLIM Data: x%x x%lx\n",
10726 phba->brd_no, totiocbsize,
10727 (unsigned long) MAX_SLIM_IOCB_SIZE);
10729 if (phba->cfg_multi_ring_support == 2)
10730 lpfc_extra_ring_setup(phba);
10736 * lpfc_sli4_queue_init - Queue initialization function
10737 * @phba: Pointer to HBA context object.
10739 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10740 * ring. This function also initializes ring indices of each ring.
10741 * This function is called during the initialization of the SLI
10742 * interface of an HBA.
10743 * This function is called with no lock held and always returns
10747 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10749 struct lpfc_sli *psli;
10750 struct lpfc_sli_ring *pring;
10754 spin_lock_irq(&phba->hbalock);
10755 INIT_LIST_HEAD(&psli->mboxq);
10756 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10757 /* Initialize list headers for txq and txcmplq as double linked lists */
10758 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10759 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10761 pring->ringno = LPFC_FCP_RING;
10762 pring->txcmplq_cnt = 0;
10763 INIT_LIST_HEAD(&pring->txq);
10764 INIT_LIST_HEAD(&pring->txcmplq);
10765 INIT_LIST_HEAD(&pring->iocb_continueq);
10766 spin_lock_init(&pring->ring_lock);
10768 pring = phba->sli4_hba.els_wq->pring;
10770 pring->ringno = LPFC_ELS_RING;
10771 pring->txcmplq_cnt = 0;
10772 INIT_LIST_HEAD(&pring->txq);
10773 INIT_LIST_HEAD(&pring->txcmplq);
10774 INIT_LIST_HEAD(&pring->iocb_continueq);
10775 spin_lock_init(&pring->ring_lock);
10777 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10778 pring = phba->sli4_hba.nvmels_wq->pring;
10780 pring->ringno = LPFC_ELS_RING;
10781 pring->txcmplq_cnt = 0;
10782 INIT_LIST_HEAD(&pring->txq);
10783 INIT_LIST_HEAD(&pring->txcmplq);
10784 INIT_LIST_HEAD(&pring->iocb_continueq);
10785 spin_lock_init(&pring->ring_lock);
10788 spin_unlock_irq(&phba->hbalock);
10792 * lpfc_sli_queue_init - Queue initialization function
10793 * @phba: Pointer to HBA context object.
10795 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10796 * ring. This function also initializes ring indices of each ring.
10797 * This function is called during the initialization of the SLI
10798 * interface of an HBA.
10799 * This function is called with no lock held and always returns
10803 lpfc_sli_queue_init(struct lpfc_hba *phba)
10805 struct lpfc_sli *psli;
10806 struct lpfc_sli_ring *pring;
10810 spin_lock_irq(&phba->hbalock);
10811 INIT_LIST_HEAD(&psli->mboxq);
10812 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10813 /* Initialize list headers for txq and txcmplq as double linked lists */
10814 for (i = 0; i < psli->num_rings; i++) {
10815 pring = &psli->sli3_ring[i];
10817 pring->sli.sli3.next_cmdidx = 0;
10818 pring->sli.sli3.local_getidx = 0;
10819 pring->sli.sli3.cmdidx = 0;
10820 INIT_LIST_HEAD(&pring->iocb_continueq);
10821 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10822 INIT_LIST_HEAD(&pring->postbufq);
10824 INIT_LIST_HEAD(&pring->txq);
10825 INIT_LIST_HEAD(&pring->txcmplq);
10826 spin_lock_init(&pring->ring_lock);
10828 spin_unlock_irq(&phba->hbalock);
10832 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10833 * @phba: Pointer to HBA context object.
10835 * This routine flushes the mailbox command subsystem. It will unconditionally
10836 * flush all the mailbox commands in the three possible stages in the mailbox
10837 * command sub-system: pending mailbox command queue; the outstanding mailbox
10838 * command; and completed mailbox command queue. It is caller's responsibility
10839 * to make sure that the driver is in the proper state to flush the mailbox
10840 * command sub-system. Namely, the posting of mailbox commands into the
10841 * pending mailbox command queue from the various clients must be stopped;
10842 * either the HBA is in a state that it will never works on the outstanding
10843 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10844 * mailbox command has been completed.
10847 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10849 LIST_HEAD(completions);
10850 struct lpfc_sli *psli = &phba->sli;
10852 unsigned long iflag;
10854 /* Disable softirqs, including timers from obtaining phba->hbalock */
10855 local_bh_disable();
10857 /* Flush all the mailbox commands in the mbox system */
10858 spin_lock_irqsave(&phba->hbalock, iflag);
10860 /* The pending mailbox command queue */
10861 list_splice_init(&phba->sli.mboxq, &completions);
10862 /* The outstanding active mailbox command */
10863 if (psli->mbox_active) {
10864 list_add_tail(&psli->mbox_active->list, &completions);
10865 psli->mbox_active = NULL;
10866 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10868 /* The completed mailbox command queue */
10869 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10870 spin_unlock_irqrestore(&phba->hbalock, iflag);
10872 /* Enable softirqs again, done with phba->hbalock */
10875 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10876 while (!list_empty(&completions)) {
10877 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10878 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10879 if (pmb->mbox_cmpl)
10880 pmb->mbox_cmpl(phba, pmb);
10885 * lpfc_sli_host_down - Vport cleanup function
10886 * @vport: Pointer to virtual port object.
10888 * lpfc_sli_host_down is called to clean up the resources
10889 * associated with a vport before destroying virtual
10890 * port data structures.
10891 * This function does following operations:
10892 * - Free discovery resources associated with this virtual
10894 * - Free iocbs associated with this virtual port in
10896 * - Send abort for all iocb commands associated with this
10897 * vport in txcmplq.
10899 * This function is called with no lock held and always returns 1.
10902 lpfc_sli_host_down(struct lpfc_vport *vport)
10904 LIST_HEAD(completions);
10905 struct lpfc_hba *phba = vport->phba;
10906 struct lpfc_sli *psli = &phba->sli;
10907 struct lpfc_queue *qp = NULL;
10908 struct lpfc_sli_ring *pring;
10909 struct lpfc_iocbq *iocb, *next_iocb;
10911 unsigned long flags = 0;
10912 uint16_t prev_pring_flag;
10914 lpfc_cleanup_discovery_resources(vport);
10916 spin_lock_irqsave(&phba->hbalock, flags);
10919 * Error everything on the txq since these iocbs
10920 * have not been given to the FW yet.
10921 * Also issue ABTS for everything on the txcmplq
10923 if (phba->sli_rev != LPFC_SLI_REV4) {
10924 for (i = 0; i < psli->num_rings; i++) {
10925 pring = &psli->sli3_ring[i];
10926 prev_pring_flag = pring->flag;
10927 /* Only slow rings */
10928 if (pring->ringno == LPFC_ELS_RING) {
10929 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10930 /* Set the lpfc data pending flag */
10931 set_bit(LPFC_DATA_READY, &phba->data_flags);
10933 list_for_each_entry_safe(iocb, next_iocb,
10934 &pring->txq, list) {
10935 if (iocb->vport != vport)
10937 list_move_tail(&iocb->list, &completions);
10939 list_for_each_entry_safe(iocb, next_iocb,
10940 &pring->txcmplq, list) {
10941 if (iocb->vport != vport)
10943 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10945 pring->flag = prev_pring_flag;
10948 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10952 if (pring == phba->sli4_hba.els_wq->pring) {
10953 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10954 /* Set the lpfc data pending flag */
10955 set_bit(LPFC_DATA_READY, &phba->data_flags);
10957 prev_pring_flag = pring->flag;
10958 spin_lock(&pring->ring_lock);
10959 list_for_each_entry_safe(iocb, next_iocb,
10960 &pring->txq, list) {
10961 if (iocb->vport != vport)
10963 list_move_tail(&iocb->list, &completions);
10965 spin_unlock(&pring->ring_lock);
10966 list_for_each_entry_safe(iocb, next_iocb,
10967 &pring->txcmplq, list) {
10968 if (iocb->vport != vport)
10970 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10972 pring->flag = prev_pring_flag;
10975 spin_unlock_irqrestore(&phba->hbalock, flags);
10977 /* Cancel all the IOCBs from the completions list */
10978 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10984 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10985 * @phba: Pointer to HBA context object.
10987 * This function cleans up all iocb, buffers, mailbox commands
10988 * while shutting down the HBA. This function is called with no
10989 * lock held and always returns 1.
10990 * This function does the following to cleanup driver resources:
10991 * - Free discovery resources for each virtual port
10992 * - Cleanup any pending fabric iocbs
10993 * - Iterate through the iocb txq and free each entry
10995 * - Free up any buffer posted to the HBA
10996 * - Free mailbox commands in the mailbox queue.
10999 lpfc_sli_hba_down(struct lpfc_hba *phba)
11001 LIST_HEAD(completions);
11002 struct lpfc_sli *psli = &phba->sli;
11003 struct lpfc_queue *qp = NULL;
11004 struct lpfc_sli_ring *pring;
11005 struct lpfc_dmabuf *buf_ptr;
11006 unsigned long flags = 0;
11009 /* Shutdown the mailbox command sub-system */
11010 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11012 lpfc_hba_down_prep(phba);
11014 /* Disable softirqs, including timers from obtaining phba->hbalock */
11015 local_bh_disable();
11017 lpfc_fabric_abort_hba(phba);
11019 spin_lock_irqsave(&phba->hbalock, flags);
11022 * Error everything on the txq since these iocbs
11023 * have not been given to the FW yet.
11025 if (phba->sli_rev != LPFC_SLI_REV4) {
11026 for (i = 0; i < psli->num_rings; i++) {
11027 pring = &psli->sli3_ring[i];
11028 /* Only slow rings */
11029 if (pring->ringno == LPFC_ELS_RING) {
11030 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11031 /* Set the lpfc data pending flag */
11032 set_bit(LPFC_DATA_READY, &phba->data_flags);
11034 list_splice_init(&pring->txq, &completions);
11037 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11041 spin_lock(&pring->ring_lock);
11042 list_splice_init(&pring->txq, &completions);
11043 spin_unlock(&pring->ring_lock);
11044 if (pring == phba->sli4_hba.els_wq->pring) {
11045 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11046 /* Set the lpfc data pending flag */
11047 set_bit(LPFC_DATA_READY, &phba->data_flags);
11051 spin_unlock_irqrestore(&phba->hbalock, flags);
11053 /* Cancel all the IOCBs from the completions list */
11054 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11057 spin_lock_irqsave(&phba->hbalock, flags);
11058 list_splice_init(&phba->elsbuf, &completions);
11059 phba->elsbuf_cnt = 0;
11060 phba->elsbuf_prev_cnt = 0;
11061 spin_unlock_irqrestore(&phba->hbalock, flags);
11063 while (!list_empty(&completions)) {
11064 list_remove_head(&completions, buf_ptr,
11065 struct lpfc_dmabuf, list);
11066 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11070 /* Enable softirqs again, done with phba->hbalock */
11073 /* Return any active mbox cmds */
11074 del_timer_sync(&psli->mbox_tmo);
11076 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11077 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11078 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11084 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11085 * @srcp: Source memory pointer.
11086 * @destp: Destination memory pointer.
11087 * @cnt: Number of words required to be copied.
11089 * This function is used for copying data between driver memory
11090 * and the SLI memory. This function also changes the endianness
11091 * of each word if native endianness is different from SLI
11092 * endianness. This function can be called with or without
11096 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11098 uint32_t *src = srcp;
11099 uint32_t *dest = destp;
11103 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11105 ldata = le32_to_cpu(ldata);
11114 * lpfc_sli_bemem_bcopy - SLI memory copy function
11115 * @srcp: Source memory pointer.
11116 * @destp: Destination memory pointer.
11117 * @cnt: Number of words required to be copied.
11119 * This function is used for copying data between a data structure
11120 * with big endian representation to local endianness.
11121 * This function can be called with or without lock.
11124 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11126 uint32_t *src = srcp;
11127 uint32_t *dest = destp;
11131 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11133 ldata = be32_to_cpu(ldata);
11141 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11142 * @phba: Pointer to HBA context object.
11143 * @pring: Pointer to driver SLI ring object.
11144 * @mp: Pointer to driver buffer object.
11146 * This function is called with no lock held.
11147 * It always return zero after adding the buffer to the postbufq
11151 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11152 struct lpfc_dmabuf *mp)
11154 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11156 spin_lock_irq(&phba->hbalock);
11157 list_add_tail(&mp->list, &pring->postbufq);
11158 pring->postbufq_cnt++;
11159 spin_unlock_irq(&phba->hbalock);
11164 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11165 * @phba: Pointer to HBA context object.
11167 * When HBQ is enabled, buffers are searched based on tags. This function
11168 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11169 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11170 * does not conflict with tags of buffer posted for unsolicited events.
11171 * The function returns the allocated tag. The function is called with
11175 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11177 spin_lock_irq(&phba->hbalock);
11178 phba->buffer_tag_count++;
11180 * Always set the QUE_BUFTAG_BIT to distiguish between
11181 * a tag assigned by HBQ.
11183 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11184 spin_unlock_irq(&phba->hbalock);
11185 return phba->buffer_tag_count;
11189 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11190 * @phba: Pointer to HBA context object.
11191 * @pring: Pointer to driver SLI ring object.
11192 * @tag: Buffer tag.
11194 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11195 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11196 * iocb is posted to the response ring with the tag of the buffer.
11197 * This function searches the pring->postbufq list using the tag
11198 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11199 * iocb. If the buffer is found then lpfc_dmabuf object of the
11200 * buffer is returned to the caller else NULL is returned.
11201 * This function is called with no lock held.
11203 struct lpfc_dmabuf *
11204 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11207 struct lpfc_dmabuf *mp, *next_mp;
11208 struct list_head *slp = &pring->postbufq;
11210 /* Search postbufq, from the beginning, looking for a match on tag */
11211 spin_lock_irq(&phba->hbalock);
11212 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11213 if (mp->buffer_tag == tag) {
11214 list_del_init(&mp->list);
11215 pring->postbufq_cnt--;
11216 spin_unlock_irq(&phba->hbalock);
11221 spin_unlock_irq(&phba->hbalock);
11222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11223 "0402 Cannot find virtual addr for buffer tag on "
11224 "ring %d Data x%lx x%px x%px x%x\n",
11225 pring->ringno, (unsigned long) tag,
11226 slp->next, slp->prev, pring->postbufq_cnt);
11232 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11233 * @phba: Pointer to HBA context object.
11234 * @pring: Pointer to driver SLI ring object.
11235 * @phys: DMA address of the buffer.
11237 * This function searches the buffer list using the dma_address
11238 * of unsolicited event to find the driver's lpfc_dmabuf object
11239 * corresponding to the dma_address. The function returns the
11240 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11241 * This function is called by the ct and els unsolicited event
11242 * handlers to get the buffer associated with the unsolicited
11245 * This function is called with no lock held.
11247 struct lpfc_dmabuf *
11248 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11251 struct lpfc_dmabuf *mp, *next_mp;
11252 struct list_head *slp = &pring->postbufq;
11254 /* Search postbufq, from the beginning, looking for a match on phys */
11255 spin_lock_irq(&phba->hbalock);
11256 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11257 if (mp->phys == phys) {
11258 list_del_init(&mp->list);
11259 pring->postbufq_cnt--;
11260 spin_unlock_irq(&phba->hbalock);
11265 spin_unlock_irq(&phba->hbalock);
11266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11267 "0410 Cannot find virtual addr for mapped buf on "
11268 "ring %d Data x%llx x%px x%px x%x\n",
11269 pring->ringno, (unsigned long long)phys,
11270 slp->next, slp->prev, pring->postbufq_cnt);
11275 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11276 * @phba: Pointer to HBA context object.
11277 * @cmdiocb: Pointer to driver command iocb object.
11278 * @rspiocb: Pointer to driver response iocb object.
11280 * This function is the completion handler for the abort iocbs for
11281 * ELS commands. This function is called from the ELS ring event
11282 * handler with no lock held. This function frees memory resources
11283 * associated with the abort iocb.
11286 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11287 struct lpfc_iocbq *rspiocb)
11289 IOCB_t *irsp = &rspiocb->iocb;
11290 uint16_t abort_iotag, abort_context;
11291 struct lpfc_iocbq *abort_iocb = NULL;
11293 if (irsp->ulpStatus) {
11296 * Assume that the port already completed and returned, or
11297 * will return the iocb. Just Log the message.
11299 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11300 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11302 spin_lock_irq(&phba->hbalock);
11303 if (phba->sli_rev < LPFC_SLI_REV4) {
11304 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11305 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11306 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11307 spin_unlock_irq(&phba->hbalock);
11310 if (abort_iotag != 0 &&
11311 abort_iotag <= phba->sli.last_iotag)
11313 phba->sli.iocbq_lookup[abort_iotag];
11315 /* For sli4 the abort_tag is the XRI,
11316 * so the abort routine puts the iotag of the iocb
11317 * being aborted in the context field of the abort
11320 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11322 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11323 "0327 Cannot abort els iocb x%px "
11324 "with tag %x context %x, abort status %x, "
11326 abort_iocb, abort_iotag, abort_context,
11327 irsp->ulpStatus, irsp->un.ulpWord[4]);
11329 spin_unlock_irq(&phba->hbalock);
11332 lpfc_sli_release_iocbq(phba, cmdiocb);
11337 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11338 * @phba: Pointer to HBA context object.
11339 * @cmdiocb: Pointer to driver command iocb object.
11340 * @rspiocb: Pointer to driver response iocb object.
11342 * The function is called from SLI ring event handler with no
11343 * lock held. This function is the completion handler for ELS commands
11344 * which are aborted. The function frees memory resources used for
11345 * the aborted ELS commands.
11348 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11349 struct lpfc_iocbq *rspiocb)
11351 IOCB_t *irsp = &rspiocb->iocb;
11353 /* ELS cmd tag <ulpIoTag> completes */
11354 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11355 "0139 Ignoring ELS cmd tag x%x completion Data: "
11357 irsp->ulpIoTag, irsp->ulpStatus,
11358 irsp->un.ulpWord[4], irsp->ulpTimeout);
11359 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11360 lpfc_ct_free_iocb(phba, cmdiocb);
11362 lpfc_els_free_iocb(phba, cmdiocb);
11367 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11368 * @phba: Pointer to HBA context object.
11369 * @pring: Pointer to driver SLI ring object.
11370 * @cmdiocb: Pointer to driver command iocb object.
11372 * This function issues an abort iocb for the provided command iocb down to
11373 * the port. Other than the case the outstanding command iocb is an abort
11374 * request, this function issues abort out unconditionally. This function is
11375 * called with hbalock held. The function returns 0 when it fails due to
11376 * memory allocation failure or when the command iocb is an abort request.
11377 * The hbalock is asserted held in the code path calling this routine.
11380 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11381 struct lpfc_iocbq *cmdiocb)
11383 struct lpfc_vport *vport = cmdiocb->vport;
11384 struct lpfc_iocbq *abtsiocbp;
11385 IOCB_t *icmd = NULL;
11386 IOCB_t *iabt = NULL;
11388 unsigned long iflags;
11389 struct lpfc_nodelist *ndlp;
11392 * There are certain command types we don't want to abort. And we
11393 * don't want to abort commands that are already in the process of
11396 icmd = &cmdiocb->iocb;
11397 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11398 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11399 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11402 /* issue ABTS for this IOCB based on iotag */
11403 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11404 if (abtsiocbp == NULL)
11407 /* This signals the response to set the correct status
11408 * before calling the completion handler
11410 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11412 iabt = &abtsiocbp->iocb;
11413 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11414 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11415 if (phba->sli_rev == LPFC_SLI_REV4) {
11416 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11417 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11419 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11420 if (pring->ringno == LPFC_ELS_RING) {
11421 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11422 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11426 iabt->ulpClass = icmd->ulpClass;
11428 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11429 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11430 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11431 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11432 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11433 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11435 if (phba->link_state >= LPFC_LINK_UP)
11436 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11438 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11440 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11441 abtsiocbp->vport = vport;
11443 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11444 "0339 Abort xri x%x, original iotag x%x, "
11445 "abort cmd iotag x%x\n",
11446 iabt->un.acxri.abortIoTag,
11447 iabt->un.acxri.abortContextTag,
11450 if (phba->sli_rev == LPFC_SLI_REV4) {
11451 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11452 if (unlikely(pring == NULL))
11454 /* Note: both hbalock and ring_lock need to be set here */
11455 spin_lock_irqsave(&pring->ring_lock, iflags);
11456 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11458 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11460 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11465 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11468 * Caller to this routine should check for IOCB_ERROR
11469 * and handle it properly. This routine no longer removes
11470 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11476 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11477 * @phba: Pointer to HBA context object.
11478 * @pring: Pointer to driver SLI ring object.
11479 * @cmdiocb: Pointer to driver command iocb object.
11481 * This function issues an abort iocb for the provided command iocb. In case
11482 * of unloading, the abort iocb will not be issued to commands on the ELS
11483 * ring. Instead, the callback function shall be changed to those commands
11484 * so that nothing happens when them finishes. This function is called with
11485 * hbalock held. The function returns 0 when the command iocb is an abort
11489 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11490 struct lpfc_iocbq *cmdiocb)
11492 struct lpfc_vport *vport = cmdiocb->vport;
11493 int retval = IOCB_ERROR;
11494 IOCB_t *icmd = NULL;
11496 lockdep_assert_held(&phba->hbalock);
11499 * There are certain command types we don't want to abort. And we
11500 * don't want to abort commands that are already in the process of
11503 icmd = &cmdiocb->iocb;
11504 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11505 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11506 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11510 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11511 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11513 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11514 goto abort_iotag_exit;
11518 * If we're unloading, don't abort iocb on the ELS ring, but change
11519 * the callback so that nothing happens when it finishes.
11521 if ((vport->load_flag & FC_UNLOADING) &&
11522 (pring->ringno == LPFC_ELS_RING)) {
11523 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11524 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11526 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11527 goto abort_iotag_exit;
11530 /* Now, we try to issue the abort to the cmdiocb out */
11531 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11535 * Caller to this routine should check for IOCB_ERROR
11536 * and handle it properly. This routine no longer removes
11537 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11543 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11544 * @phba: pointer to lpfc HBA data structure.
11546 * This routine will abort all pending and outstanding iocbs to an HBA.
11549 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11551 struct lpfc_sli *psli = &phba->sli;
11552 struct lpfc_sli_ring *pring;
11553 struct lpfc_queue *qp = NULL;
11556 if (phba->sli_rev != LPFC_SLI_REV4) {
11557 for (i = 0; i < psli->num_rings; i++) {
11558 pring = &psli->sli3_ring[i];
11559 lpfc_sli_abort_iocb_ring(phba, pring);
11563 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11567 lpfc_sli_abort_iocb_ring(phba, pring);
11572 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11573 * @iocbq: Pointer to driver iocb object.
11574 * @vport: Pointer to driver virtual port object.
11575 * @tgt_id: SCSI ID of the target.
11576 * @lun_id: LUN ID of the scsi device.
11577 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11579 * This function acts as an iocb filter for functions which abort or count
11580 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11581 * 0 if the filtering criteria is met for the given iocb and will return
11582 * 1 if the filtering criteria is not met.
11583 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11584 * given iocb is for the SCSI device specified by vport, tgt_id and
11585 * lun_id parameter.
11586 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11587 * given iocb is for the SCSI target specified by vport and tgt_id
11589 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11590 * given iocb is for the SCSI host associated with the given vport.
11591 * This function is called with no locks held.
11594 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11595 uint16_t tgt_id, uint64_t lun_id,
11596 lpfc_ctx_cmd ctx_cmd)
11598 struct lpfc_io_buf *lpfc_cmd;
11599 IOCB_t *icmd = NULL;
11602 if (iocbq->vport != vport)
11605 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11606 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11607 iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11610 icmd = &iocbq->iocb;
11611 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11612 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11615 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11617 if (lpfc_cmd->pCmd == NULL)
11622 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11623 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11624 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11628 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11629 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11632 case LPFC_CTX_HOST:
11636 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11637 __func__, ctx_cmd);
11645 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11646 * @vport: Pointer to virtual port.
11647 * @tgt_id: SCSI ID of the target.
11648 * @lun_id: LUN ID of the scsi device.
11649 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11651 * This function returns number of FCP commands pending for the vport.
11652 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11653 * commands pending on the vport associated with SCSI device specified
11654 * by tgt_id and lun_id parameters.
11655 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11656 * commands pending on the vport associated with SCSI target specified
11657 * by tgt_id parameter.
11658 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11659 * commands pending on the vport.
11660 * This function returns the number of iocbs which satisfy the filter.
11661 * This function is called without any lock held.
11664 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11665 lpfc_ctx_cmd ctx_cmd)
11667 struct lpfc_hba *phba = vport->phba;
11668 struct lpfc_iocbq *iocbq;
11671 spin_lock_irq(&phba->hbalock);
11672 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11673 iocbq = phba->sli.iocbq_lookup[i];
11675 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11679 spin_unlock_irq(&phba->hbalock);
11685 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11686 * @phba: Pointer to HBA context object
11687 * @cmdiocb: Pointer to command iocb object.
11688 * @rspiocb: Pointer to response iocb object.
11690 * This function is called when an aborted FCP iocb completes. This
11691 * function is called by the ring event handler with no lock held.
11692 * This function frees the iocb.
11695 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11696 struct lpfc_iocbq *rspiocb)
11698 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11699 "3096 ABORT_XRI_CN completing on rpi x%x "
11700 "original iotag x%x, abort cmd iotag x%x "
11701 "status 0x%x, reason 0x%x\n",
11702 cmdiocb->iocb.un.acxri.abortContextTag,
11703 cmdiocb->iocb.un.acxri.abortIoTag,
11704 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11705 rspiocb->iocb.un.ulpWord[4]);
11706 lpfc_sli_release_iocbq(phba, cmdiocb);
11711 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11712 * @vport: Pointer to virtual port.
11713 * @pring: Pointer to driver SLI ring object.
11714 * @tgt_id: SCSI ID of the target.
11715 * @lun_id: LUN ID of the scsi device.
11716 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11718 * This function sends an abort command for every SCSI command
11719 * associated with the given virtual port pending on the ring
11720 * filtered by lpfc_sli_validate_fcp_iocb function.
11721 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11722 * FCP iocbs associated with lun specified by tgt_id and lun_id
11724 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11725 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11726 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11727 * FCP iocbs associated with virtual port.
11728 * This function returns number of iocbs it failed to abort.
11729 * This function is called with no locks held.
11732 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11733 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11735 struct lpfc_hba *phba = vport->phba;
11736 struct lpfc_iocbq *iocbq;
11737 struct lpfc_iocbq *abtsiocb;
11738 struct lpfc_sli_ring *pring_s4;
11739 IOCB_t *cmd = NULL;
11740 int errcnt = 0, ret_val = 0;
11743 /* all I/Os are in process of being flushed */
11744 if (phba->hba_flag & HBA_IOQ_FLUSH)
11747 for (i = 1; i <= phba->sli.last_iotag; i++) {
11748 iocbq = phba->sli.iocbq_lookup[i];
11750 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11755 * If the iocbq is already being aborted, don't take a second
11756 * action, but do count it.
11758 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11761 /* issue ABTS for this IOCB based on iotag */
11762 abtsiocb = lpfc_sli_get_iocbq(phba);
11763 if (abtsiocb == NULL) {
11768 /* indicate the IO is being aborted by the driver. */
11769 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11771 cmd = &iocbq->iocb;
11772 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11773 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11774 if (phba->sli_rev == LPFC_SLI_REV4)
11775 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11777 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11778 abtsiocb->iocb.ulpLe = 1;
11779 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11780 abtsiocb->vport = vport;
11782 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11783 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11784 if (iocbq->iocb_flag & LPFC_IO_FCP)
11785 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11786 if (iocbq->iocb_flag & LPFC_IO_FOF)
11787 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11789 if (lpfc_is_link_up(phba))
11790 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11792 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11794 /* Setup callback routine and issue the command. */
11795 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11796 if (phba->sli_rev == LPFC_SLI_REV4) {
11797 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11800 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11803 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11805 if (ret_val == IOCB_ERROR) {
11806 lpfc_sli_release_iocbq(phba, abtsiocb);
11816 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11817 * @vport: Pointer to virtual port.
11818 * @pring: Pointer to driver SLI ring object.
11819 * @tgt_id: SCSI ID of the target.
11820 * @lun_id: LUN ID of the scsi device.
11821 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11823 * This function sends an abort command for every SCSI command
11824 * associated with the given virtual port pending on the ring
11825 * filtered by lpfc_sli_validate_fcp_iocb function.
11826 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11827 * FCP iocbs associated with lun specified by tgt_id and lun_id
11829 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11830 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11831 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11832 * FCP iocbs associated with virtual port.
11833 * This function returns number of iocbs it aborted .
11834 * This function is called with no locks held right after a taskmgmt
11838 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11839 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11841 struct lpfc_hba *phba = vport->phba;
11842 struct lpfc_io_buf *lpfc_cmd;
11843 struct lpfc_iocbq *abtsiocbq;
11844 struct lpfc_nodelist *ndlp;
11845 struct lpfc_iocbq *iocbq;
11847 int sum, i, ret_val;
11848 unsigned long iflags;
11849 struct lpfc_sli_ring *pring_s4 = NULL;
11851 spin_lock_irqsave(&phba->hbalock, iflags);
11853 /* all I/Os are in process of being flushed */
11854 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11855 spin_unlock_irqrestore(&phba->hbalock, iflags);
11860 for (i = 1; i <= phba->sli.last_iotag; i++) {
11861 iocbq = phba->sli.iocbq_lookup[i];
11863 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11867 /* Guard against IO completion being called at same time */
11868 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11869 spin_lock(&lpfc_cmd->buf_lock);
11871 if (!lpfc_cmd->pCmd) {
11872 spin_unlock(&lpfc_cmd->buf_lock);
11876 if (phba->sli_rev == LPFC_SLI_REV4) {
11878 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11880 spin_unlock(&lpfc_cmd->buf_lock);
11883 /* Note: both hbalock and ring_lock must be set here */
11884 spin_lock(&pring_s4->ring_lock);
11888 * If the iocbq is already being aborted, don't take a second
11889 * action, but do count it.
11891 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11892 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11893 if (phba->sli_rev == LPFC_SLI_REV4)
11894 spin_unlock(&pring_s4->ring_lock);
11895 spin_unlock(&lpfc_cmd->buf_lock);
11899 /* issue ABTS for this IOCB based on iotag */
11900 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11902 if (phba->sli_rev == LPFC_SLI_REV4)
11903 spin_unlock(&pring_s4->ring_lock);
11904 spin_unlock(&lpfc_cmd->buf_lock);
11908 icmd = &iocbq->iocb;
11909 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11910 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11911 if (phba->sli_rev == LPFC_SLI_REV4)
11912 abtsiocbq->iocb.un.acxri.abortIoTag =
11913 iocbq->sli4_xritag;
11915 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11916 abtsiocbq->iocb.ulpLe = 1;
11917 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11918 abtsiocbq->vport = vport;
11920 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11921 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11922 if (iocbq->iocb_flag & LPFC_IO_FCP)
11923 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11924 if (iocbq->iocb_flag & LPFC_IO_FOF)
11925 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11927 ndlp = lpfc_cmd->rdata->pnode;
11929 if (lpfc_is_link_up(phba) &&
11930 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11931 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11933 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11935 /* Setup callback routine and issue the command. */
11936 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11939 * Indicate the IO is being aborted by the driver and set
11940 * the caller's flag into the aborted IO.
11942 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11944 if (phba->sli_rev == LPFC_SLI_REV4) {
11945 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11947 spin_unlock(&pring_s4->ring_lock);
11949 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11953 spin_unlock(&lpfc_cmd->buf_lock);
11955 if (ret_val == IOCB_ERROR)
11956 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11960 spin_unlock_irqrestore(&phba->hbalock, iflags);
11965 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11966 * @phba: Pointer to HBA context object.
11967 * @cmdiocbq: Pointer to command iocb.
11968 * @rspiocbq: Pointer to response iocb.
11970 * This function is the completion handler for iocbs issued using
11971 * lpfc_sli_issue_iocb_wait function. This function is called by the
11972 * ring event handler function without any lock held. This function
11973 * can be called from both worker thread context and interrupt
11974 * context. This function also can be called from other thread which
11975 * cleans up the SLI layer objects.
11976 * This function copy the contents of the response iocb to the
11977 * response iocb memory object provided by the caller of
11978 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11979 * sleeps for the iocb completion.
11982 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11983 struct lpfc_iocbq *cmdiocbq,
11984 struct lpfc_iocbq *rspiocbq)
11986 wait_queue_head_t *pdone_q;
11987 unsigned long iflags;
11988 struct lpfc_io_buf *lpfc_cmd;
11990 spin_lock_irqsave(&phba->hbalock, iflags);
11991 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11994 * A time out has occurred for the iocb. If a time out
11995 * completion handler has been supplied, call it. Otherwise,
11996 * just free the iocbq.
11999 spin_unlock_irqrestore(&phba->hbalock, iflags);
12000 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12001 cmdiocbq->wait_iocb_cmpl = NULL;
12002 if (cmdiocbq->iocb_cmpl)
12003 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12005 lpfc_sli_release_iocbq(phba, cmdiocbq);
12009 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12010 if (cmdiocbq->context2 && rspiocbq)
12011 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12012 &rspiocbq->iocb, sizeof(IOCB_t));
12014 /* Set the exchange busy flag for task management commands */
12015 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12016 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12017 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12019 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12020 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12022 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12025 pdone_q = cmdiocbq->context_un.wait_queue;
12028 spin_unlock_irqrestore(&phba->hbalock, iflags);
12033 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12034 * @phba: Pointer to HBA context object..
12035 * @piocbq: Pointer to command iocb.
12036 * @flag: Flag to test.
12038 * This routine grabs the hbalock and then test the iocb_flag to
12039 * see if the passed in flag is set.
12041 * 1 if flag is set.
12042 * 0 if flag is not set.
12045 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12046 struct lpfc_iocbq *piocbq, uint32_t flag)
12048 unsigned long iflags;
12051 spin_lock_irqsave(&phba->hbalock, iflags);
12052 ret = piocbq->iocb_flag & flag;
12053 spin_unlock_irqrestore(&phba->hbalock, iflags);
12059 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12060 * @phba: Pointer to HBA context object..
12061 * @ring_number: Ring number
12062 * @piocb: Pointer to command iocb.
12063 * @prspiocbq: Pointer to response iocb.
12064 * @timeout: Timeout in number of seconds.
12066 * This function issues the iocb to firmware and waits for the
12067 * iocb to complete. The iocb_cmpl field of the shall be used
12068 * to handle iocbs which time out. If the field is NULL, the
12069 * function shall free the iocbq structure. If more clean up is
12070 * needed, the caller is expected to provide a completion function
12071 * that will provide the needed clean up. If the iocb command is
12072 * not completed within timeout seconds, the function will either
12073 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12074 * completion function set in the iocb_cmpl field and then return
12075 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12076 * resources if this function returns IOCB_TIMEDOUT.
12077 * The function waits for the iocb completion using an
12078 * non-interruptible wait.
12079 * This function will sleep while waiting for iocb completion.
12080 * So, this function should not be called from any context which
12081 * does not allow sleeping. Due to the same reason, this function
12082 * cannot be called with interrupt disabled.
12083 * This function assumes that the iocb completions occur while
12084 * this function sleep. So, this function cannot be called from
12085 * the thread which process iocb completion for this ring.
12086 * This function clears the iocb_flag of the iocb object before
12087 * issuing the iocb and the iocb completion handler sets this
12088 * flag and wakes this thread when the iocb completes.
12089 * The contents of the response iocb will be copied to prspiocbq
12090 * by the completion handler when the command completes.
12091 * This function returns IOCB_SUCCESS when success.
12092 * This function is called with no lock held.
12095 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12096 uint32_t ring_number,
12097 struct lpfc_iocbq *piocb,
12098 struct lpfc_iocbq *prspiocbq,
12101 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12102 long timeleft, timeout_req = 0;
12103 int retval = IOCB_SUCCESS;
12105 struct lpfc_iocbq *iocb;
12107 int txcmplq_cnt = 0;
12108 struct lpfc_sli_ring *pring;
12109 unsigned long iflags;
12110 bool iocb_completed = true;
12112 if (phba->sli_rev >= LPFC_SLI_REV4)
12113 pring = lpfc_sli4_calc_ring(phba, piocb);
12115 pring = &phba->sli.sli3_ring[ring_number];
12117 * If the caller has provided a response iocbq buffer, then context2
12118 * is NULL or its an error.
12121 if (piocb->context2)
12123 piocb->context2 = prspiocbq;
12126 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12127 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12128 piocb->context_un.wait_queue = &done_q;
12129 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12131 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12132 if (lpfc_readl(phba->HCregaddr, &creg_val))
12134 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12135 writel(creg_val, phba->HCregaddr);
12136 readl(phba->HCregaddr); /* flush */
12139 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12140 SLI_IOCB_RET_IOCB);
12141 if (retval == IOCB_SUCCESS) {
12142 timeout_req = msecs_to_jiffies(timeout * 1000);
12143 timeleft = wait_event_timeout(done_q,
12144 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12146 spin_lock_irqsave(&phba->hbalock, iflags);
12147 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12150 * IOCB timed out. Inform the wake iocb wait
12151 * completion function and set local status
12154 iocb_completed = false;
12155 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12157 spin_unlock_irqrestore(&phba->hbalock, iflags);
12158 if (iocb_completed) {
12159 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12160 "0331 IOCB wake signaled\n");
12161 /* Note: we are not indicating if the IOCB has a success
12162 * status or not - that's for the caller to check.
12163 * IOCB_SUCCESS means just that the command was sent and
12164 * completed. Not that it completed successfully.
12166 } else if (timeleft == 0) {
12167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12168 "0338 IOCB wait timeout error - no "
12169 "wake response Data x%x\n", timeout);
12170 retval = IOCB_TIMEDOUT;
12172 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12173 "0330 IOCB wake NOT set, "
12175 timeout, (timeleft / jiffies));
12176 retval = IOCB_TIMEDOUT;
12178 } else if (retval == IOCB_BUSY) {
12179 if (phba->cfg_log_verbose & LOG_SLI) {
12180 list_for_each_entry(iocb, &pring->txq, list) {
12183 list_for_each_entry(iocb, &pring->txcmplq, list) {
12186 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12187 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12188 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12192 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12193 "0332 IOCB wait issue failed, Data x%x\n",
12195 retval = IOCB_ERROR;
12198 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12199 if (lpfc_readl(phba->HCregaddr, &creg_val))
12201 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12202 writel(creg_val, phba->HCregaddr);
12203 readl(phba->HCregaddr); /* flush */
12207 piocb->context2 = NULL;
12209 piocb->context_un.wait_queue = NULL;
12210 piocb->iocb_cmpl = NULL;
12215 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12216 * @phba: Pointer to HBA context object.
12217 * @pmboxq: Pointer to driver mailbox object.
12218 * @timeout: Timeout in number of seconds.
12220 * This function issues the mailbox to firmware and waits for the
12221 * mailbox command to complete. If the mailbox command is not
12222 * completed within timeout seconds, it returns MBX_TIMEOUT.
12223 * The function waits for the mailbox completion using an
12224 * interruptible wait. If the thread is woken up due to a
12225 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12226 * should not free the mailbox resources, if this function returns
12228 * This function will sleep while waiting for mailbox completion.
12229 * So, this function should not be called from any context which
12230 * does not allow sleeping. Due to the same reason, this function
12231 * cannot be called with interrupt disabled.
12232 * This function assumes that the mailbox completion occurs while
12233 * this function sleep. So, this function cannot be called from
12234 * the worker thread which processes mailbox completion.
12235 * This function is called in the context of HBA management
12237 * This function returns MBX_SUCCESS when successful.
12238 * This function is called with no lock held.
12241 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12244 struct completion mbox_done;
12246 unsigned long flag;
12248 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12249 /* setup wake call as IOCB callback */
12250 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12252 /* setup context3 field to pass wait_queue pointer to wake function */
12253 init_completion(&mbox_done);
12254 pmboxq->context3 = &mbox_done;
12255 /* now issue the command */
12256 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12257 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12258 wait_for_completion_timeout(&mbox_done,
12259 msecs_to_jiffies(timeout * 1000));
12261 spin_lock_irqsave(&phba->hbalock, flag);
12262 pmboxq->context3 = NULL;
12264 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12265 * else do not free the resources.
12267 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12268 retval = MBX_SUCCESS;
12270 retval = MBX_TIMEOUT;
12271 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12273 spin_unlock_irqrestore(&phba->hbalock, flag);
12279 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12280 * @phba: Pointer to HBA context.
12281 * @mbx_action: Mailbox shutdown options.
12283 * This function is called to shutdown the driver's mailbox sub-system.
12284 * It first marks the mailbox sub-system is in a block state to prevent
12285 * the asynchronous mailbox command from issued off the pending mailbox
12286 * command queue. If the mailbox command sub-system shutdown is due to
12287 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12288 * the mailbox sub-system flush routine to forcefully bring down the
12289 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12290 * as with offline or HBA function reset), this routine will wait for the
12291 * outstanding mailbox command to complete before invoking the mailbox
12292 * sub-system flush routine to gracefully bring down mailbox sub-system.
12295 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12297 struct lpfc_sli *psli = &phba->sli;
12298 unsigned long timeout;
12300 if (mbx_action == LPFC_MBX_NO_WAIT) {
12301 /* delay 100ms for port state */
12303 lpfc_sli_mbox_sys_flush(phba);
12306 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12308 /* Disable softirqs, including timers from obtaining phba->hbalock */
12309 local_bh_disable();
12311 spin_lock_irq(&phba->hbalock);
12312 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12314 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12315 /* Determine how long we might wait for the active mailbox
12316 * command to be gracefully completed by firmware.
12318 if (phba->sli.mbox_active)
12319 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12320 phba->sli.mbox_active) *
12322 spin_unlock_irq(&phba->hbalock);
12324 /* Enable softirqs again, done with phba->hbalock */
12327 while (phba->sli.mbox_active) {
12328 /* Check active mailbox complete status every 2ms */
12330 if (time_after(jiffies, timeout))
12331 /* Timeout, let the mailbox flush routine to
12332 * forcefully release active mailbox command
12337 spin_unlock_irq(&phba->hbalock);
12339 /* Enable softirqs again, done with phba->hbalock */
12343 lpfc_sli_mbox_sys_flush(phba);
12347 * lpfc_sli_eratt_read - read sli-3 error attention events
12348 * @phba: Pointer to HBA context.
12350 * This function is called to read the SLI3 device error attention registers
12351 * for possible error attention events. The caller must hold the hostlock
12352 * with spin_lock_irq().
12354 * This function returns 1 when there is Error Attention in the Host Attention
12355 * Register and returns 0 otherwise.
12358 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12362 /* Read chip Host Attention (HA) register */
12363 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12366 if (ha_copy & HA_ERATT) {
12367 /* Read host status register to retrieve error event */
12368 if (lpfc_sli_read_hs(phba))
12371 /* Check if there is a deferred error condition is active */
12372 if ((HS_FFER1 & phba->work_hs) &&
12373 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12374 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12375 phba->hba_flag |= DEFER_ERATT;
12376 /* Clear all interrupt enable conditions */
12377 writel(0, phba->HCregaddr);
12378 readl(phba->HCregaddr);
12381 /* Set the driver HA work bitmap */
12382 phba->work_ha |= HA_ERATT;
12383 /* Indicate polling handles this ERATT */
12384 phba->hba_flag |= HBA_ERATT_HANDLED;
12390 /* Set the driver HS work bitmap */
12391 phba->work_hs |= UNPLUG_ERR;
12392 /* Set the driver HA work bitmap */
12393 phba->work_ha |= HA_ERATT;
12394 /* Indicate polling handles this ERATT */
12395 phba->hba_flag |= HBA_ERATT_HANDLED;
12400 * lpfc_sli4_eratt_read - read sli-4 error attention events
12401 * @phba: Pointer to HBA context.
12403 * This function is called to read the SLI4 device error attention registers
12404 * for possible error attention events. The caller must hold the hostlock
12405 * with spin_lock_irq().
12407 * This function returns 1 when there is Error Attention in the Host Attention
12408 * Register and returns 0 otherwise.
12411 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12413 uint32_t uerr_sta_hi, uerr_sta_lo;
12414 uint32_t if_type, portsmphr;
12415 struct lpfc_register portstat_reg;
12419 * For now, use the SLI4 device internal unrecoverable error
12420 * registers for error attention. This can be changed later.
12422 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12424 case LPFC_SLI_INTF_IF_TYPE_0:
12425 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12427 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12429 phba->work_hs |= UNPLUG_ERR;
12430 phba->work_ha |= HA_ERATT;
12431 phba->hba_flag |= HBA_ERATT_HANDLED;
12434 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12435 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12437 "1423 HBA Unrecoverable error: "
12438 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12439 "ue_mask_lo_reg=0x%x, "
12440 "ue_mask_hi_reg=0x%x\n",
12441 uerr_sta_lo, uerr_sta_hi,
12442 phba->sli4_hba.ue_mask_lo,
12443 phba->sli4_hba.ue_mask_hi);
12444 phba->work_status[0] = uerr_sta_lo;
12445 phba->work_status[1] = uerr_sta_hi;
12446 phba->work_ha |= HA_ERATT;
12447 phba->hba_flag |= HBA_ERATT_HANDLED;
12451 case LPFC_SLI_INTF_IF_TYPE_2:
12452 case LPFC_SLI_INTF_IF_TYPE_6:
12453 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12454 &portstat_reg.word0) ||
12455 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12457 phba->work_hs |= UNPLUG_ERR;
12458 phba->work_ha |= HA_ERATT;
12459 phba->hba_flag |= HBA_ERATT_HANDLED;
12462 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12463 phba->work_status[0] =
12464 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12465 phba->work_status[1] =
12466 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12467 logmask = LOG_TRACE_EVENT;
12468 if (phba->work_status[0] ==
12469 SLIPORT_ERR1_REG_ERR_CODE_2 &&
12470 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
12472 lpfc_printf_log(phba, KERN_ERR, logmask,
12473 "2885 Port Status Event: "
12474 "port status reg 0x%x, "
12475 "port smphr reg 0x%x, "
12476 "error 1=0x%x, error 2=0x%x\n",
12477 portstat_reg.word0,
12479 phba->work_status[0],
12480 phba->work_status[1]);
12481 phba->work_ha |= HA_ERATT;
12482 phba->hba_flag |= HBA_ERATT_HANDLED;
12486 case LPFC_SLI_INTF_IF_TYPE_1:
12488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12489 "2886 HBA Error Attention on unsupported "
12490 "if type %d.", if_type);
12498 * lpfc_sli_check_eratt - check error attention events
12499 * @phba: Pointer to HBA context.
12501 * This function is called from timer soft interrupt context to check HBA's
12502 * error attention register bit for error attention events.
12504 * This function returns 1 when there is Error Attention in the Host Attention
12505 * Register and returns 0 otherwise.
12508 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12512 /* If somebody is waiting to handle an eratt, don't process it
12513 * here. The brdkill function will do this.
12515 if (phba->link_flag & LS_IGNORE_ERATT)
12518 /* Check if interrupt handler handles this ERATT */
12519 spin_lock_irq(&phba->hbalock);
12520 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12521 /* Interrupt handler has handled ERATT */
12522 spin_unlock_irq(&phba->hbalock);
12527 * If there is deferred error attention, do not check for error
12530 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12531 spin_unlock_irq(&phba->hbalock);
12535 /* If PCI channel is offline, don't process it */
12536 if (unlikely(pci_channel_offline(phba->pcidev))) {
12537 spin_unlock_irq(&phba->hbalock);
12541 switch (phba->sli_rev) {
12542 case LPFC_SLI_REV2:
12543 case LPFC_SLI_REV3:
12544 /* Read chip Host Attention (HA) register */
12545 ha_copy = lpfc_sli_eratt_read(phba);
12547 case LPFC_SLI_REV4:
12548 /* Read device Uncoverable Error (UERR) registers */
12549 ha_copy = lpfc_sli4_eratt_read(phba);
12552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12553 "0299 Invalid SLI revision (%d)\n",
12558 spin_unlock_irq(&phba->hbalock);
12564 * lpfc_intr_state_check - Check device state for interrupt handling
12565 * @phba: Pointer to HBA context.
12567 * This inline routine checks whether a device or its PCI slot is in a state
12568 * that the interrupt should be handled.
12570 * This function returns 0 if the device or the PCI slot is in a state that
12571 * interrupt should be handled, otherwise -EIO.
12574 lpfc_intr_state_check(struct lpfc_hba *phba)
12576 /* If the pci channel is offline, ignore all the interrupts */
12577 if (unlikely(pci_channel_offline(phba->pcidev)))
12580 /* Update device level interrupt statistics */
12581 phba->sli.slistat.sli_intr++;
12583 /* Ignore all interrupts during initialization. */
12584 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12591 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12592 * @irq: Interrupt number.
12593 * @dev_id: The device context pointer.
12595 * This function is directly called from the PCI layer as an interrupt
12596 * service routine when device with SLI-3 interface spec is enabled with
12597 * MSI-X multi-message interrupt mode and there are slow-path events in
12598 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12599 * interrupt mode, this function is called as part of the device-level
12600 * interrupt handler. When the PCI slot is in error recovery or the HBA
12601 * is undergoing initialization, the interrupt handler will not process
12602 * the interrupt. The link attention and ELS ring attention events are
12603 * handled by the worker thread. The interrupt handler signals the worker
12604 * thread and returns for these events. This function is called without
12605 * any lock held. It gets the hbalock to access and update SLI data
12608 * This function returns IRQ_HANDLED when interrupt is handled else it
12609 * returns IRQ_NONE.
12612 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12614 struct lpfc_hba *phba;
12615 uint32_t ha_copy, hc_copy;
12616 uint32_t work_ha_copy;
12617 unsigned long status;
12618 unsigned long iflag;
12621 MAILBOX_t *mbox, *pmbox;
12622 struct lpfc_vport *vport;
12623 struct lpfc_nodelist *ndlp;
12624 struct lpfc_dmabuf *mp;
12629 * Get the driver's phba structure from the dev_id and
12630 * assume the HBA is not interrupting.
12632 phba = (struct lpfc_hba *)dev_id;
12634 if (unlikely(!phba))
12638 * Stuff needs to be attented to when this function is invoked as an
12639 * individual interrupt handler in MSI-X multi-message interrupt mode
12641 if (phba->intr_type == MSIX) {
12642 /* Check device state for handling interrupt */
12643 if (lpfc_intr_state_check(phba))
12645 /* Need to read HA REG for slow-path events */
12646 spin_lock_irqsave(&phba->hbalock, iflag);
12647 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12649 /* If somebody is waiting to handle an eratt don't process it
12650 * here. The brdkill function will do this.
12652 if (phba->link_flag & LS_IGNORE_ERATT)
12653 ha_copy &= ~HA_ERATT;
12654 /* Check the need for handling ERATT in interrupt handler */
12655 if (ha_copy & HA_ERATT) {
12656 if (phba->hba_flag & HBA_ERATT_HANDLED)
12657 /* ERATT polling has handled ERATT */
12658 ha_copy &= ~HA_ERATT;
12660 /* Indicate interrupt handler handles ERATT */
12661 phba->hba_flag |= HBA_ERATT_HANDLED;
12665 * If there is deferred error attention, do not check for any
12668 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12669 spin_unlock_irqrestore(&phba->hbalock, iflag);
12673 /* Clear up only attention source related to slow-path */
12674 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12677 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12678 HC_LAINT_ENA | HC_ERINT_ENA),
12680 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12682 writel(hc_copy, phba->HCregaddr);
12683 readl(phba->HAregaddr); /* flush */
12684 spin_unlock_irqrestore(&phba->hbalock, iflag);
12686 ha_copy = phba->ha_copy;
12688 work_ha_copy = ha_copy & phba->work_ha_mask;
12690 if (work_ha_copy) {
12691 if (work_ha_copy & HA_LATT) {
12692 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12694 * Turn off Link Attention interrupts
12695 * until CLEAR_LA done
12697 spin_lock_irqsave(&phba->hbalock, iflag);
12698 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12699 if (lpfc_readl(phba->HCregaddr, &control))
12701 control &= ~HC_LAINT_ENA;
12702 writel(control, phba->HCregaddr);
12703 readl(phba->HCregaddr); /* flush */
12704 spin_unlock_irqrestore(&phba->hbalock, iflag);
12707 work_ha_copy &= ~HA_LATT;
12710 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12712 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12713 * the only slow ring.
12715 status = (work_ha_copy &
12716 (HA_RXMASK << (4*LPFC_ELS_RING)));
12717 status >>= (4*LPFC_ELS_RING);
12718 if (status & HA_RXMASK) {
12719 spin_lock_irqsave(&phba->hbalock, iflag);
12720 if (lpfc_readl(phba->HCregaddr, &control))
12723 lpfc_debugfs_slow_ring_trc(phba,
12724 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12726 (uint32_t)phba->sli.slistat.sli_intr);
12728 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12729 lpfc_debugfs_slow_ring_trc(phba,
12730 "ISR Disable ring:"
12731 "pwork:x%x hawork:x%x wait:x%x",
12732 phba->work_ha, work_ha_copy,
12733 (uint32_t)((unsigned long)
12734 &phba->work_waitq));
12737 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12738 writel(control, phba->HCregaddr);
12739 readl(phba->HCregaddr); /* flush */
12742 lpfc_debugfs_slow_ring_trc(phba,
12743 "ISR slow ring: pwork:"
12744 "x%x hawork:x%x wait:x%x",
12745 phba->work_ha, work_ha_copy,
12746 (uint32_t)((unsigned long)
12747 &phba->work_waitq));
12749 spin_unlock_irqrestore(&phba->hbalock, iflag);
12752 spin_lock_irqsave(&phba->hbalock, iflag);
12753 if (work_ha_copy & HA_ERATT) {
12754 if (lpfc_sli_read_hs(phba))
12757 * Check if there is a deferred error condition
12760 if ((HS_FFER1 & phba->work_hs) &&
12761 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12762 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12764 phba->hba_flag |= DEFER_ERATT;
12765 /* Clear all interrupt enable conditions */
12766 writel(0, phba->HCregaddr);
12767 readl(phba->HCregaddr);
12771 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12772 pmb = phba->sli.mbox_active;
12773 pmbox = &pmb->u.mb;
12775 vport = pmb->vport;
12777 /* First check out the status word */
12778 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12779 if (pmbox->mbxOwner != OWN_HOST) {
12780 spin_unlock_irqrestore(&phba->hbalock, iflag);
12782 * Stray Mailbox Interrupt, mbxCommand <cmd>
12783 * mbxStatus <status>
12785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12786 "(%d):0304 Stray Mailbox "
12787 "Interrupt mbxCommand x%x "
12789 (vport ? vport->vpi : 0),
12792 /* clear mailbox attention bit */
12793 work_ha_copy &= ~HA_MBATT;
12795 phba->sli.mbox_active = NULL;
12796 spin_unlock_irqrestore(&phba->hbalock, iflag);
12797 phba->last_completion_time = jiffies;
12798 del_timer(&phba->sli.mbox_tmo);
12799 if (pmb->mbox_cmpl) {
12800 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12802 if (pmb->out_ext_byte_len &&
12804 lpfc_sli_pcimem_bcopy(
12807 pmb->out_ext_byte_len);
12809 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12810 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12812 lpfc_debugfs_disc_trc(vport,
12813 LPFC_DISC_TRC_MBOX_VPORT,
12814 "MBOX dflt rpi: : "
12815 "status:x%x rpi:x%x",
12816 (uint32_t)pmbox->mbxStatus,
12817 pmbox->un.varWords[0], 0);
12819 if (!pmbox->mbxStatus) {
12820 mp = (struct lpfc_dmabuf *)
12822 ndlp = (struct lpfc_nodelist *)
12825 /* Reg_LOGIN of dflt RPI was
12826 * successful. new lets get
12827 * rid of the RPI using the
12828 * same mbox buffer.
12830 lpfc_unreg_login(phba,
12832 pmbox->un.varWords[0],
12835 lpfc_mbx_cmpl_dflt_rpi;
12837 pmb->ctx_ndlp = ndlp;
12838 pmb->vport = vport;
12839 rc = lpfc_sli_issue_mbox(phba,
12842 if (rc != MBX_BUSY)
12843 lpfc_printf_log(phba,
12846 "0350 rc should have"
12847 "been MBX_BUSY\n");
12848 if (rc != MBX_NOT_FINISHED)
12849 goto send_current_mbox;
12853 &phba->pport->work_port_lock,
12855 phba->pport->work_port_events &=
12857 spin_unlock_irqrestore(
12858 &phba->pport->work_port_lock,
12860 lpfc_mbox_cmpl_put(phba, pmb);
12863 spin_unlock_irqrestore(&phba->hbalock, iflag);
12865 if ((work_ha_copy & HA_MBATT) &&
12866 (phba->sli.mbox_active == NULL)) {
12868 /* Process next mailbox command if there is one */
12870 rc = lpfc_sli_issue_mbox(phba, NULL,
12872 } while (rc == MBX_NOT_FINISHED);
12873 if (rc != MBX_SUCCESS)
12874 lpfc_printf_log(phba, KERN_ERR,
12876 "0349 rc should be "
12880 spin_lock_irqsave(&phba->hbalock, iflag);
12881 phba->work_ha |= work_ha_copy;
12882 spin_unlock_irqrestore(&phba->hbalock, iflag);
12883 lpfc_worker_wake_up(phba);
12885 return IRQ_HANDLED;
12887 spin_unlock_irqrestore(&phba->hbalock, iflag);
12888 return IRQ_HANDLED;
12890 } /* lpfc_sli_sp_intr_handler */
12893 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12894 * @irq: Interrupt number.
12895 * @dev_id: The device context pointer.
12897 * This function is directly called from the PCI layer as an interrupt
12898 * service routine when device with SLI-3 interface spec is enabled with
12899 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12900 * ring event in the HBA. However, when the device is enabled with either
12901 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12902 * device-level interrupt handler. When the PCI slot is in error recovery
12903 * or the HBA is undergoing initialization, the interrupt handler will not
12904 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12905 * the intrrupt context. This function is called without any lock held.
12906 * It gets the hbalock to access and update SLI data structures.
12908 * This function returns IRQ_HANDLED when interrupt is handled else it
12909 * returns IRQ_NONE.
12912 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12914 struct lpfc_hba *phba;
12916 unsigned long status;
12917 unsigned long iflag;
12918 struct lpfc_sli_ring *pring;
12920 /* Get the driver's phba structure from the dev_id and
12921 * assume the HBA is not interrupting.
12923 phba = (struct lpfc_hba *) dev_id;
12925 if (unlikely(!phba))
12929 * Stuff needs to be attented to when this function is invoked as an
12930 * individual interrupt handler in MSI-X multi-message interrupt mode
12932 if (phba->intr_type == MSIX) {
12933 /* Check device state for handling interrupt */
12934 if (lpfc_intr_state_check(phba))
12936 /* Need to read HA REG for FCP ring and other ring events */
12937 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12938 return IRQ_HANDLED;
12939 /* Clear up only attention source related to fast-path */
12940 spin_lock_irqsave(&phba->hbalock, iflag);
12942 * If there is deferred error attention, do not check for
12945 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12946 spin_unlock_irqrestore(&phba->hbalock, iflag);
12949 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12951 readl(phba->HAregaddr); /* flush */
12952 spin_unlock_irqrestore(&phba->hbalock, iflag);
12954 ha_copy = phba->ha_copy;
12957 * Process all events on FCP ring. Take the optimized path for FCP IO.
12959 ha_copy &= ~(phba->work_ha_mask);
12961 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12962 status >>= (4*LPFC_FCP_RING);
12963 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12964 if (status & HA_RXMASK)
12965 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12967 if (phba->cfg_multi_ring_support == 2) {
12969 * Process all events on extra ring. Take the optimized path
12970 * for extra ring IO.
12972 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12973 status >>= (4*LPFC_EXTRA_RING);
12974 if (status & HA_RXMASK) {
12975 lpfc_sli_handle_fast_ring_event(phba,
12976 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12980 return IRQ_HANDLED;
12981 } /* lpfc_sli_fp_intr_handler */
12984 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12985 * @irq: Interrupt number.
12986 * @dev_id: The device context pointer.
12988 * This function is the HBA device-level interrupt handler to device with
12989 * SLI-3 interface spec, called from the PCI layer when either MSI or
12990 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12991 * requires driver attention. This function invokes the slow-path interrupt
12992 * attention handling function and fast-path interrupt attention handling
12993 * function in turn to process the relevant HBA attention events. This
12994 * function is called without any lock held. It gets the hbalock to access
12995 * and update SLI data structures.
12997 * This function returns IRQ_HANDLED when interrupt is handled, else it
12998 * returns IRQ_NONE.
13001 lpfc_sli_intr_handler(int irq, void *dev_id)
13003 struct lpfc_hba *phba;
13004 irqreturn_t sp_irq_rc, fp_irq_rc;
13005 unsigned long status1, status2;
13009 * Get the driver's phba structure from the dev_id and
13010 * assume the HBA is not interrupting.
13012 phba = (struct lpfc_hba *) dev_id;
13014 if (unlikely(!phba))
13017 /* Check device state for handling interrupt */
13018 if (lpfc_intr_state_check(phba))
13021 spin_lock(&phba->hbalock);
13022 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13023 spin_unlock(&phba->hbalock);
13024 return IRQ_HANDLED;
13027 if (unlikely(!phba->ha_copy)) {
13028 spin_unlock(&phba->hbalock);
13030 } else if (phba->ha_copy & HA_ERATT) {
13031 if (phba->hba_flag & HBA_ERATT_HANDLED)
13032 /* ERATT polling has handled ERATT */
13033 phba->ha_copy &= ~HA_ERATT;
13035 /* Indicate interrupt handler handles ERATT */
13036 phba->hba_flag |= HBA_ERATT_HANDLED;
13040 * If there is deferred error attention, do not check for any interrupt.
13042 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13043 spin_unlock(&phba->hbalock);
13047 /* Clear attention sources except link and error attentions */
13048 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13049 spin_unlock(&phba->hbalock);
13050 return IRQ_HANDLED;
13052 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13053 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13055 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13056 writel(hc_copy, phba->HCregaddr);
13057 readl(phba->HAregaddr); /* flush */
13058 spin_unlock(&phba->hbalock);
13061 * Invokes slow-path host attention interrupt handling as appropriate.
13064 /* status of events with mailbox and link attention */
13065 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13067 /* status of events with ELS ring */
13068 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13069 status2 >>= (4*LPFC_ELS_RING);
13071 if (status1 || (status2 & HA_RXMASK))
13072 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13074 sp_irq_rc = IRQ_NONE;
13077 * Invoke fast-path host attention interrupt handling as appropriate.
13080 /* status of events with FCP ring */
13081 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13082 status1 >>= (4*LPFC_FCP_RING);
13084 /* status of events with extra ring */
13085 if (phba->cfg_multi_ring_support == 2) {
13086 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13087 status2 >>= (4*LPFC_EXTRA_RING);
13091 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13092 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13094 fp_irq_rc = IRQ_NONE;
13096 /* Return device-level interrupt handling status */
13097 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13098 } /* lpfc_sli_intr_handler */
13101 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13102 * @phba: pointer to lpfc hba data structure.
13104 * This routine is invoked by the worker thread to process all the pending
13105 * SLI4 els abort xri events.
13107 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13109 struct lpfc_cq_event *cq_event;
13110 unsigned long iflags;
13112 /* First, declare the els xri abort event has been handled */
13113 spin_lock_irqsave(&phba->hbalock, iflags);
13114 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13115 spin_unlock_irqrestore(&phba->hbalock, iflags);
13117 /* Now, handle all the els xri abort events */
13118 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13119 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13120 /* Get the first event from the head of the event queue */
13121 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13122 cq_event, struct lpfc_cq_event, list);
13123 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13125 /* Notify aborted XRI for ELS work queue */
13126 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13128 /* Free the event processed back to the free pool */
13129 lpfc_sli4_cq_event_release(phba, cq_event);
13130 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13133 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13137 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13138 * @phba: pointer to lpfc hba data structure
13139 * @pIocbIn: pointer to the rspiocbq
13140 * @pIocbOut: pointer to the cmdiocbq
13141 * @wcqe: pointer to the complete wcqe
13143 * This routine transfers the fields of a command iocbq to a response iocbq
13144 * by copying all the IOCB fields from command iocbq and transferring the
13145 * completion status information from the complete wcqe.
13148 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13149 struct lpfc_iocbq *pIocbIn,
13150 struct lpfc_iocbq *pIocbOut,
13151 struct lpfc_wcqe_complete *wcqe)
13154 unsigned long iflags;
13155 uint32_t status, max_response;
13156 struct lpfc_dmabuf *dmabuf;
13157 struct ulp_bde64 *bpl, bde;
13158 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13160 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13161 sizeof(struct lpfc_iocbq) - offset);
13162 /* Map WCQE parameters into irspiocb parameters */
13163 status = bf_get(lpfc_wcqe_c_status, wcqe);
13164 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13165 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13166 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13167 pIocbIn->iocb.un.fcpi.fcpi_parm =
13168 pIocbOut->iocb.un.fcpi.fcpi_parm -
13169 wcqe->total_data_placed;
13171 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13173 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13174 switch (pIocbOut->iocb.ulpCommand) {
13175 case CMD_ELS_REQUEST64_CR:
13176 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13177 bpl = (struct ulp_bde64 *)dmabuf->virt;
13178 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13179 max_response = bde.tus.f.bdeSize;
13181 case CMD_GEN_REQUEST64_CR:
13183 if (!pIocbOut->context3)
13185 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13186 sizeof(struct ulp_bde64);
13187 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13188 bpl = (struct ulp_bde64 *)dmabuf->virt;
13189 for (i = 0; i < numBdes; i++) {
13190 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13191 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13192 max_response += bde.tus.f.bdeSize;
13196 max_response = wcqe->total_data_placed;
13199 if (max_response < wcqe->total_data_placed)
13200 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13202 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13203 wcqe->total_data_placed;
13206 /* Convert BG errors for completion status */
13207 if (status == CQE_STATUS_DI_ERROR) {
13208 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13210 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13211 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13213 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13215 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13216 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13217 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13218 BGS_GUARD_ERR_MASK;
13219 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13220 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13221 BGS_APPTAG_ERR_MASK;
13222 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13223 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13224 BGS_REFTAG_ERR_MASK;
13226 /* Check to see if there was any good data before the error */
13227 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13228 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13229 BGS_HI_WATER_MARK_PRESENT_MASK;
13230 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13231 wcqe->total_data_placed;
13235 * Set ALL the error bits to indicate we don't know what
13236 * type of error it is.
13238 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13239 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13240 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13241 BGS_GUARD_ERR_MASK);
13244 /* Pick up HBA exchange busy condition */
13245 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13246 spin_lock_irqsave(&phba->hbalock, iflags);
13247 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13248 spin_unlock_irqrestore(&phba->hbalock, iflags);
13253 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13254 * @phba: Pointer to HBA context object.
13255 * @irspiocbq: Pointer to work-queue completion queue entry.
13257 * This routine handles an ELS work-queue completion event and construct
13258 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13259 * discovery engine to handle.
13261 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13263 static struct lpfc_iocbq *
13264 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13265 struct lpfc_iocbq *irspiocbq)
13267 struct lpfc_sli_ring *pring;
13268 struct lpfc_iocbq *cmdiocbq;
13269 struct lpfc_wcqe_complete *wcqe;
13270 unsigned long iflags;
13272 pring = lpfc_phba_elsring(phba);
13273 if (unlikely(!pring))
13276 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13277 pring->stats.iocb_event++;
13278 /* Look up the ELS command IOCB and create pseudo response IOCB */
13279 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13280 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13281 if (unlikely(!cmdiocbq)) {
13282 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13283 "0386 ELS complete with no corresponding "
13284 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13285 wcqe->word0, wcqe->total_data_placed,
13286 wcqe->parameter, wcqe->word3);
13287 lpfc_sli_release_iocbq(phba, irspiocbq);
13291 spin_lock_irqsave(&pring->ring_lock, iflags);
13292 /* Put the iocb back on the txcmplq */
13293 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13294 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13296 /* Fake the irspiocbq and copy necessary response information */
13297 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13302 inline struct lpfc_cq_event *
13303 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13305 struct lpfc_cq_event *cq_event;
13307 /* Allocate a new internal CQ_EVENT entry */
13308 cq_event = lpfc_sli4_cq_event_alloc(phba);
13310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13311 "0602 Failed to alloc CQ_EVENT entry\n");
13315 /* Move the CQE into the event */
13316 memcpy(&cq_event->cqe, entry, size);
13321 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13322 * @phba: Pointer to HBA context object.
13323 * @mcqe: Pointer to mailbox completion queue entry.
13325 * This routine process a mailbox completion queue entry with asynchronous
13328 * Return: true if work posted to worker thread, otherwise false.
13331 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13333 struct lpfc_cq_event *cq_event;
13334 unsigned long iflags;
13336 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13337 "0392 Async Event: word0:x%x, word1:x%x, "
13338 "word2:x%x, word3:x%x\n", mcqe->word0,
13339 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13341 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13345 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13346 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13347 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13349 /* Set the async event flag */
13350 spin_lock_irqsave(&phba->hbalock, iflags);
13351 phba->hba_flag |= ASYNC_EVENT;
13352 spin_unlock_irqrestore(&phba->hbalock, iflags);
13358 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13359 * @phba: Pointer to HBA context object.
13360 * @mcqe: Pointer to mailbox completion queue entry.
13362 * This routine process a mailbox completion queue entry with mailbox
13363 * completion event.
13365 * Return: true if work posted to worker thread, otherwise false.
13368 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13370 uint32_t mcqe_status;
13371 MAILBOX_t *mbox, *pmbox;
13372 struct lpfc_mqe *mqe;
13373 struct lpfc_vport *vport;
13374 struct lpfc_nodelist *ndlp;
13375 struct lpfc_dmabuf *mp;
13376 unsigned long iflags;
13378 bool workposted = false;
13381 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13382 if (!bf_get(lpfc_trailer_completed, mcqe))
13383 goto out_no_mqe_complete;
13385 /* Get the reference to the active mbox command */
13386 spin_lock_irqsave(&phba->hbalock, iflags);
13387 pmb = phba->sli.mbox_active;
13388 if (unlikely(!pmb)) {
13389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13390 "1832 No pending MBOX command to handle\n");
13391 spin_unlock_irqrestore(&phba->hbalock, iflags);
13392 goto out_no_mqe_complete;
13394 spin_unlock_irqrestore(&phba->hbalock, iflags);
13396 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13398 vport = pmb->vport;
13400 /* Reset heartbeat timer */
13401 phba->last_completion_time = jiffies;
13402 del_timer(&phba->sli.mbox_tmo);
13404 /* Move mbox data to caller's mailbox region, do endian swapping */
13405 if (pmb->mbox_cmpl && mbox)
13406 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13409 * For mcqe errors, conditionally move a modified error code to
13410 * the mbox so that the error will not be missed.
13412 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13413 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13414 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13415 bf_set(lpfc_mqe_status, mqe,
13416 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13418 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13419 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13420 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13421 "MBOX dflt rpi: status:x%x rpi:x%x",
13423 pmbox->un.varWords[0], 0);
13424 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13425 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13426 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13427 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13428 * RID of the PPI using the same mbox buffer.
13430 lpfc_unreg_login(phba, vport->vpi,
13431 pmbox->un.varWords[0], pmb);
13432 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13434 pmb->ctx_ndlp = ndlp;
13435 pmb->vport = vport;
13436 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13437 if (rc != MBX_BUSY)
13438 lpfc_printf_log(phba, KERN_ERR,
13441 "have been MBX_BUSY\n");
13442 if (rc != MBX_NOT_FINISHED)
13443 goto send_current_mbox;
13446 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13447 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13448 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13450 /* There is mailbox completion work to do */
13451 spin_lock_irqsave(&phba->hbalock, iflags);
13452 __lpfc_mbox_cmpl_put(phba, pmb);
13453 phba->work_ha |= HA_MBATT;
13454 spin_unlock_irqrestore(&phba->hbalock, iflags);
13458 spin_lock_irqsave(&phba->hbalock, iflags);
13459 /* Release the mailbox command posting token */
13460 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13461 /* Setting active mailbox pointer need to be in sync to flag clear */
13462 phba->sli.mbox_active = NULL;
13463 if (bf_get(lpfc_trailer_consumed, mcqe))
13464 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13465 spin_unlock_irqrestore(&phba->hbalock, iflags);
13466 /* Wake up worker thread to post the next pending mailbox command */
13467 lpfc_worker_wake_up(phba);
13470 out_no_mqe_complete:
13471 spin_lock_irqsave(&phba->hbalock, iflags);
13472 if (bf_get(lpfc_trailer_consumed, mcqe))
13473 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13474 spin_unlock_irqrestore(&phba->hbalock, iflags);
13479 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13480 * @phba: Pointer to HBA context object.
13481 * @cq: Pointer to associated CQ
13482 * @cqe: Pointer to mailbox completion queue entry.
13484 * This routine process a mailbox completion queue entry, it invokes the
13485 * proper mailbox complete handling or asynchronous event handling routine
13486 * according to the MCQE's async bit.
13488 * Return: true if work posted to worker thread, otherwise false.
13491 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13492 struct lpfc_cqe *cqe)
13494 struct lpfc_mcqe mcqe;
13499 /* Copy the mailbox MCQE and convert endian order as needed */
13500 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13502 /* Invoke the proper event handling routine */
13503 if (!bf_get(lpfc_trailer_async, &mcqe))
13504 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13506 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13511 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13512 * @phba: Pointer to HBA context object.
13513 * @cq: Pointer to associated CQ
13514 * @wcqe: Pointer to work-queue completion queue entry.
13516 * This routine handles an ELS work-queue completion event.
13518 * Return: true if work posted to worker thread, otherwise false.
13521 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13522 struct lpfc_wcqe_complete *wcqe)
13524 struct lpfc_iocbq *irspiocbq;
13525 unsigned long iflags;
13526 struct lpfc_sli_ring *pring = cq->pring;
13528 int txcmplq_cnt = 0;
13530 /* Check for response status */
13531 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13532 /* Log the error status */
13533 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13534 "0357 ELS CQE error: status=x%x: "
13535 "CQE: %08x %08x %08x %08x\n",
13536 bf_get(lpfc_wcqe_c_status, wcqe),
13537 wcqe->word0, wcqe->total_data_placed,
13538 wcqe->parameter, wcqe->word3);
13541 /* Get an irspiocbq for later ELS response processing use */
13542 irspiocbq = lpfc_sli_get_iocbq(phba);
13544 if (!list_empty(&pring->txq))
13546 if (!list_empty(&pring->txcmplq))
13548 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13549 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13550 "els_txcmplq_cnt=%d\n",
13551 txq_cnt, phba->iocb_cnt,
13556 /* Save off the slow-path queue event for work thread to process */
13557 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13558 spin_lock_irqsave(&phba->hbalock, iflags);
13559 list_add_tail(&irspiocbq->cq_event.list,
13560 &phba->sli4_hba.sp_queue_event);
13561 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13562 spin_unlock_irqrestore(&phba->hbalock, iflags);
13568 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13569 * @phba: Pointer to HBA context object.
13570 * @wcqe: Pointer to work-queue completion queue entry.
13572 * This routine handles slow-path WQ entry consumed event by invoking the
13573 * proper WQ release routine to the slow-path WQ.
13576 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13577 struct lpfc_wcqe_release *wcqe)
13579 /* sanity check on queue memory */
13580 if (unlikely(!phba->sli4_hba.els_wq))
13582 /* Check for the slow-path ELS work queue */
13583 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13584 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13585 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13587 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13588 "2579 Slow-path wqe consume event carries "
13589 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13590 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13591 phba->sli4_hba.els_wq->queue_id);
13595 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13596 * @phba: Pointer to HBA context object.
13597 * @cq: Pointer to a WQ completion queue.
13598 * @wcqe: Pointer to work-queue completion queue entry.
13600 * This routine handles an XRI abort event.
13602 * Return: true if work posted to worker thread, otherwise false.
13605 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13606 struct lpfc_queue *cq,
13607 struct sli4_wcqe_xri_aborted *wcqe)
13609 bool workposted = false;
13610 struct lpfc_cq_event *cq_event;
13611 unsigned long iflags;
13613 switch (cq->subtype) {
13615 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13616 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13617 /* Notify aborted XRI for NVME work queue */
13618 if (phba->nvmet_support)
13619 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13621 workposted = false;
13623 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13625 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13627 workposted = false;
13630 cq_event->hdwq = cq->hdwq;
13631 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13633 list_add_tail(&cq_event->list,
13634 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13635 /* Set the els xri abort event flag */
13636 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13637 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13643 "0603 Invalid CQ subtype %d: "
13644 "%08x %08x %08x %08x\n",
13645 cq->subtype, wcqe->word0, wcqe->parameter,
13646 wcqe->word2, wcqe->word3);
13647 workposted = false;
13653 #define FC_RCTL_MDS_DIAGS 0xF4
13656 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13657 * @phba: Pointer to HBA context object.
13658 * @rcqe: Pointer to receive-queue completion queue entry.
13660 * This routine process a receive-queue completion queue entry.
13662 * Return: true if work posted to worker thread, otherwise false.
13665 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13667 bool workposted = false;
13668 struct fc_frame_header *fc_hdr;
13669 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13670 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13671 struct lpfc_nvmet_tgtport *tgtp;
13672 struct hbq_dmabuf *dma_buf;
13673 uint32_t status, rq_id;
13674 unsigned long iflags;
13676 /* sanity check on queue memory */
13677 if (unlikely(!hrq) || unlikely(!drq))
13680 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13681 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13683 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13684 if (rq_id != hrq->queue_id)
13687 status = bf_get(lpfc_rcqe_status, rcqe);
13689 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13691 "2537 Receive Frame Truncated!!\n");
13693 case FC_STATUS_RQ_SUCCESS:
13694 spin_lock_irqsave(&phba->hbalock, iflags);
13695 lpfc_sli4_rq_release(hrq, drq);
13696 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13698 hrq->RQ_no_buf_found++;
13699 spin_unlock_irqrestore(&phba->hbalock, iflags);
13703 hrq->RQ_buf_posted--;
13704 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13706 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13708 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13709 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13710 spin_unlock_irqrestore(&phba->hbalock, iflags);
13711 /* Handle MDS Loopback frames */
13712 if (!(phba->pport->load_flag & FC_UNLOADING))
13713 lpfc_sli4_handle_mds_loopback(phba->pport,
13716 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13720 /* save off the frame for the work thread to process */
13721 list_add_tail(&dma_buf->cq_event.list,
13722 &phba->sli4_hba.sp_queue_event);
13723 /* Frame received */
13724 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13725 spin_unlock_irqrestore(&phba->hbalock, iflags);
13728 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13729 if (phba->nvmet_support) {
13730 tgtp = phba->targetport->private;
13731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13732 "6402 RQE Error x%x, posted %d err_cnt "
13734 status, hrq->RQ_buf_posted,
13735 hrq->RQ_no_posted_buf,
13736 atomic_read(&tgtp->rcv_fcp_cmd_in),
13737 atomic_read(&tgtp->rcv_fcp_cmd_out),
13738 atomic_read(&tgtp->xmt_fcp_release));
13742 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13743 hrq->RQ_no_posted_buf++;
13744 /* Post more buffers if possible */
13745 spin_lock_irqsave(&phba->hbalock, iflags);
13746 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13747 spin_unlock_irqrestore(&phba->hbalock, iflags);
13756 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13757 * @phba: Pointer to HBA context object.
13758 * @cq: Pointer to the completion queue.
13759 * @cqe: Pointer to a completion queue entry.
13761 * This routine process a slow-path work-queue or receive queue completion queue
13764 * Return: true if work posted to worker thread, otherwise false.
13767 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13768 struct lpfc_cqe *cqe)
13770 struct lpfc_cqe cqevt;
13771 bool workposted = false;
13773 /* Copy the work queue CQE and convert endian order if needed */
13774 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13776 /* Check and process for different type of WCQE and dispatch */
13777 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13778 case CQE_CODE_COMPL_WQE:
13779 /* Process the WQ/RQ complete event */
13780 phba->last_completion_time = jiffies;
13781 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13782 (struct lpfc_wcqe_complete *)&cqevt);
13784 case CQE_CODE_RELEASE_WQE:
13785 /* Process the WQ release event */
13786 lpfc_sli4_sp_handle_rel_wcqe(phba,
13787 (struct lpfc_wcqe_release *)&cqevt);
13789 case CQE_CODE_XRI_ABORTED:
13790 /* Process the WQ XRI abort event */
13791 phba->last_completion_time = jiffies;
13792 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13793 (struct sli4_wcqe_xri_aborted *)&cqevt);
13795 case CQE_CODE_RECEIVE:
13796 case CQE_CODE_RECEIVE_V1:
13797 /* Process the RQ event */
13798 phba->last_completion_time = jiffies;
13799 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13800 (struct lpfc_rcqe *)&cqevt);
13803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13804 "0388 Not a valid WCQE code: x%x\n",
13805 bf_get(lpfc_cqe_code, &cqevt));
13812 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13813 * @phba: Pointer to HBA context object.
13814 * @eqe: Pointer to fast-path event queue entry.
13815 * @speq: Pointer to slow-path event queue.
13817 * This routine process a event queue entry from the slow-path event queue.
13818 * It will check the MajorCode and MinorCode to determine this is for a
13819 * completion event on a completion queue, if not, an error shall be logged
13820 * and just return. Otherwise, it will get to the corresponding completion
13821 * queue and process all the entries on that completion queue, rearm the
13822 * completion queue, and then return.
13826 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13827 struct lpfc_queue *speq)
13829 struct lpfc_queue *cq = NULL, *childq;
13833 /* Get the reference to the corresponding CQ */
13834 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13836 list_for_each_entry(childq, &speq->child_list, list) {
13837 if (childq->queue_id == cqid) {
13842 if (unlikely(!cq)) {
13843 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13845 "0365 Slow-path CQ identifier "
13846 "(%d) does not exist\n", cqid);
13850 /* Save EQ associated with this CQ */
13851 cq->assoc_qp = speq;
13853 if (is_kdump_kernel())
13854 ret = queue_work(phba->wq, &cq->spwork);
13856 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
13859 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13860 "0390 Cannot schedule queue work "
13861 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13862 cqid, cq->queue_id, raw_smp_processor_id());
13866 * __lpfc_sli4_process_cq - Process elements of a CQ
13867 * @phba: Pointer to HBA context object.
13868 * @cq: Pointer to CQ to be processed
13869 * @handler: Routine to process each cqe
13870 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13871 * @poll_mode: Polling mode we were called from
13873 * This routine processes completion queue entries in a CQ. While a valid
13874 * queue element is found, the handler is called. During processing checks
13875 * are made for periodic doorbell writes to let the hardware know of
13876 * element consumption.
13878 * If the max limit on cqes to process is hit, or there are no more valid
13879 * entries, the loop stops. If we processed a sufficient number of elements,
13880 * meaning there is sufficient load, rather than rearming and generating
13881 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13882 * indicates no rescheduling.
13884 * Returns True if work scheduled, False otherwise.
13887 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13888 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13889 struct lpfc_cqe *), unsigned long *delay,
13890 enum lpfc_poll_mode poll_mode)
13892 struct lpfc_cqe *cqe;
13893 bool workposted = false;
13894 int count = 0, consumed = 0;
13897 /* default - no reschedule */
13900 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13901 goto rearm_and_exit;
13903 /* Process all the entries to the CQ */
13905 cqe = lpfc_sli4_cq_get(cq);
13907 workposted |= handler(phba, cq, cqe);
13908 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13911 if (!(++count % cq->max_proc_limit))
13914 if (!(count % cq->notify_interval)) {
13915 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13918 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13921 if (count == LPFC_NVMET_CQ_NOTIFY)
13922 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13924 cqe = lpfc_sli4_cq_get(cq);
13926 if (count >= phba->cfg_cq_poll_threshold) {
13931 /* Note: complete the irq_poll softirq before rearming CQ */
13932 if (poll_mode == LPFC_IRQ_POLL)
13933 irq_poll_complete(&cq->iop);
13935 /* Track the max number of CQEs processed in 1 EQ */
13936 if (count > cq->CQ_max_cqe)
13937 cq->CQ_max_cqe = count;
13939 cq->assoc_qp->EQ_cqe_cnt += count;
13941 /* Catch the no cq entry condition */
13942 if (unlikely(count == 0))
13943 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13944 "0369 No entry from completion queue "
13945 "qid=%d\n", cq->queue_id);
13947 xchg(&cq->queue_claimed, 0);
13950 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13951 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13957 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13958 * @cq: pointer to CQ to process
13960 * This routine calls the cq processing routine with a handler specific
13961 * to the type of queue bound to it.
13963 * The CQ routine returns two values: the first is the calling status,
13964 * which indicates whether work was queued to the background discovery
13965 * thread. If true, the routine should wakeup the discovery thread;
13966 * the second is the delay parameter. If non-zero, rather than rearming
13967 * the CQ and yet another interrupt, the CQ handler should be queued so
13968 * that it is processed in a subsequent polling action. The value of
13969 * the delay indicates when to reschedule it.
13972 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13974 struct lpfc_hba *phba = cq->phba;
13975 unsigned long delay;
13976 bool workposted = false;
13979 /* Process and rearm the CQ */
13980 switch (cq->type) {
13982 workposted |= __lpfc_sli4_process_cq(phba, cq,
13983 lpfc_sli4_sp_handle_mcqe,
13984 &delay, LPFC_QUEUE_WORK);
13987 if (cq->subtype == LPFC_IO)
13988 workposted |= __lpfc_sli4_process_cq(phba, cq,
13989 lpfc_sli4_fp_handle_cqe,
13990 &delay, LPFC_QUEUE_WORK);
13992 workposted |= __lpfc_sli4_process_cq(phba, cq,
13993 lpfc_sli4_sp_handle_cqe,
13994 &delay, LPFC_QUEUE_WORK);
13997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13998 "0370 Invalid completion queue type (%d)\n",
14004 if (is_kdump_kernel())
14005 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14008 ret = queue_delayed_work_on(cq->chann, phba->wq,
14009 &cq->sched_spwork, delay);
14011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14012 "0394 Cannot schedule queue work "
14013 "for cqid=%d on CPU %d\n",
14014 cq->queue_id, cq->chann);
14017 /* wake up worker thread if there are works to be done */
14019 lpfc_worker_wake_up(phba);
14023 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14025 * @work: pointer to work element
14027 * translates from the work handler and calls the slow-path handler.
14030 lpfc_sli4_sp_process_cq(struct work_struct *work)
14032 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14034 __lpfc_sli4_sp_process_cq(cq);
14038 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14039 * @work: pointer to work element
14041 * translates from the work handler and calls the slow-path handler.
14044 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14046 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14047 struct lpfc_queue, sched_spwork);
14049 __lpfc_sli4_sp_process_cq(cq);
14053 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14054 * @phba: Pointer to HBA context object.
14055 * @cq: Pointer to associated CQ
14056 * @wcqe: Pointer to work-queue completion queue entry.
14058 * This routine process a fast-path work queue completion entry from fast-path
14059 * event queue for FCP command response completion.
14062 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14063 struct lpfc_wcqe_complete *wcqe)
14065 struct lpfc_sli_ring *pring = cq->pring;
14066 struct lpfc_iocbq *cmdiocbq;
14067 struct lpfc_iocbq irspiocbq;
14068 unsigned long iflags;
14070 /* Check for response status */
14071 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14072 /* If resource errors reported from HBA, reduce queue
14073 * depth of the SCSI device.
14075 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14076 IOSTAT_LOCAL_REJECT)) &&
14077 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14078 IOERR_NO_RESOURCES))
14079 phba->lpfc_rampdown_queue_depth(phba);
14081 /* Log the cmpl status */
14082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14083 "0373 FCP CQE cmpl: status=x%x: "
14084 "CQE: %08x %08x %08x %08x\n",
14085 bf_get(lpfc_wcqe_c_status, wcqe),
14086 wcqe->word0, wcqe->total_data_placed,
14087 wcqe->parameter, wcqe->word3);
14090 /* Look up the FCP command IOCB and create pseudo response IOCB */
14091 spin_lock_irqsave(&pring->ring_lock, iflags);
14092 pring->stats.iocb_event++;
14093 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14094 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14095 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14096 if (unlikely(!cmdiocbq)) {
14097 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14098 "0374 FCP complete with no corresponding "
14099 "cmdiocb: iotag (%d)\n",
14100 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14103 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14104 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14106 if (cmdiocbq->iocb_cmpl == NULL) {
14107 if (cmdiocbq->wqe_cmpl) {
14108 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14109 spin_lock_irqsave(&phba->hbalock, iflags);
14110 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14111 spin_unlock_irqrestore(&phba->hbalock, iflags);
14114 /* Pass the cmd_iocb and the wcqe to the upper layer */
14115 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14118 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14119 "0375 FCP cmdiocb not callback function "
14121 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14125 /* Fake the irspiocb and copy necessary response information */
14126 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14128 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14129 spin_lock_irqsave(&phba->hbalock, iflags);
14130 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14131 spin_unlock_irqrestore(&phba->hbalock, iflags);
14134 /* Pass the cmd_iocb and the rsp state to the upper layer */
14135 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14139 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14140 * @phba: Pointer to HBA context object.
14141 * @cq: Pointer to completion queue.
14142 * @wcqe: Pointer to work-queue completion queue entry.
14144 * This routine handles an fast-path WQ entry consumed event by invoking the
14145 * proper WQ release routine to the slow-path WQ.
14148 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14149 struct lpfc_wcqe_release *wcqe)
14151 struct lpfc_queue *childwq;
14152 bool wqid_matched = false;
14155 /* Check for fast-path FCP work queue release */
14156 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14157 list_for_each_entry(childwq, &cq->child_list, list) {
14158 if (childwq->queue_id == hba_wqid) {
14159 lpfc_sli4_wq_release(childwq,
14160 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14161 if (childwq->q_flag & HBA_NVMET_WQFULL)
14162 lpfc_nvmet_wqfull_process(phba, childwq);
14163 wqid_matched = true;
14167 /* Report warning log message if no match found */
14168 if (wqid_matched != true)
14169 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14170 "2580 Fast-path wqe consume event carries "
14171 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14175 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14176 * @phba: Pointer to HBA context object.
14177 * @cq: Pointer to completion queue.
14178 * @rcqe: Pointer to receive-queue completion queue entry.
14180 * This routine process a receive-queue completion queue entry.
14182 * Return: true if work posted to worker thread, otherwise false.
14185 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14186 struct lpfc_rcqe *rcqe)
14188 bool workposted = false;
14189 struct lpfc_queue *hrq;
14190 struct lpfc_queue *drq;
14191 struct rqb_dmabuf *dma_buf;
14192 struct fc_frame_header *fc_hdr;
14193 struct lpfc_nvmet_tgtport *tgtp;
14194 uint32_t status, rq_id;
14195 unsigned long iflags;
14196 uint32_t fctl, idx;
14198 if ((phba->nvmet_support == 0) ||
14199 (phba->sli4_hba.nvmet_cqset == NULL))
14202 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14203 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14204 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14206 /* sanity check on queue memory */
14207 if (unlikely(!hrq) || unlikely(!drq))
14210 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14211 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14213 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14215 if ((phba->nvmet_support == 0) ||
14216 (rq_id != hrq->queue_id))
14219 status = bf_get(lpfc_rcqe_status, rcqe);
14221 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14223 "6126 Receive Frame Truncated!!\n");
14225 case FC_STATUS_RQ_SUCCESS:
14226 spin_lock_irqsave(&phba->hbalock, iflags);
14227 lpfc_sli4_rq_release(hrq, drq);
14228 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14230 hrq->RQ_no_buf_found++;
14231 spin_unlock_irqrestore(&phba->hbalock, iflags);
14234 spin_unlock_irqrestore(&phba->hbalock, iflags);
14236 hrq->RQ_buf_posted--;
14237 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14239 /* Just some basic sanity checks on FCP Command frame */
14240 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14241 fc_hdr->fh_f_ctl[1] << 8 |
14242 fc_hdr->fh_f_ctl[2]);
14244 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14245 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14246 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14249 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14250 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14251 lpfc_nvmet_unsol_fcp_event(
14252 phba, idx, dma_buf, cq->isr_timestamp,
14253 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14257 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14259 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14260 if (phba->nvmet_support) {
14261 tgtp = phba->targetport->private;
14262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14263 "6401 RQE Error x%x, posted %d err_cnt "
14265 status, hrq->RQ_buf_posted,
14266 hrq->RQ_no_posted_buf,
14267 atomic_read(&tgtp->rcv_fcp_cmd_in),
14268 atomic_read(&tgtp->rcv_fcp_cmd_out),
14269 atomic_read(&tgtp->xmt_fcp_release));
14273 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14274 hrq->RQ_no_posted_buf++;
14275 /* Post more buffers if possible */
14283 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14284 * @phba: adapter with cq
14285 * @cq: Pointer to the completion queue.
14286 * @cqe: Pointer to fast-path completion queue entry.
14288 * This routine process a fast-path work queue completion entry from fast-path
14289 * event queue for FCP command response completion.
14291 * Return: true if work posted to worker thread, otherwise false.
14294 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14295 struct lpfc_cqe *cqe)
14297 struct lpfc_wcqe_release wcqe;
14298 bool workposted = false;
14300 /* Copy the work queue CQE and convert endian order if needed */
14301 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14303 /* Check and process for different type of WCQE and dispatch */
14304 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14305 case CQE_CODE_COMPL_WQE:
14306 case CQE_CODE_NVME_ERSP:
14308 /* Process the WQ complete event */
14309 phba->last_completion_time = jiffies;
14310 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14311 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14312 (struct lpfc_wcqe_complete *)&wcqe);
14314 case CQE_CODE_RELEASE_WQE:
14315 cq->CQ_release_wqe++;
14316 /* Process the WQ release event */
14317 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14318 (struct lpfc_wcqe_release *)&wcqe);
14320 case CQE_CODE_XRI_ABORTED:
14321 cq->CQ_xri_aborted++;
14322 /* Process the WQ XRI abort event */
14323 phba->last_completion_time = jiffies;
14324 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14325 (struct sli4_wcqe_xri_aborted *)&wcqe);
14327 case CQE_CODE_RECEIVE_V1:
14328 case CQE_CODE_RECEIVE:
14329 phba->last_completion_time = jiffies;
14330 if (cq->subtype == LPFC_NVMET) {
14331 workposted = lpfc_sli4_nvmet_handle_rcqe(
14332 phba, cq, (struct lpfc_rcqe *)&wcqe);
14336 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14337 "0144 Not a valid CQE code: x%x\n",
14338 bf_get(lpfc_wcqe_c_code, &wcqe));
14345 * lpfc_sli4_sched_cq_work - Schedules cq work
14346 * @phba: Pointer to HBA context object.
14347 * @cq: Pointer to CQ
14350 * This routine checks the poll mode of the CQ corresponding to
14351 * cq->chann, then either schedules a softirq or queue_work to complete
14354 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14355 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14358 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14359 struct lpfc_queue *cq, uint16_t cqid)
14363 switch (cq->poll_mode) {
14364 case LPFC_IRQ_POLL:
14365 irq_poll_sched(&cq->iop);
14367 case LPFC_QUEUE_WORK:
14369 if (is_kdump_kernel())
14370 ret = queue_work(phba->wq, &cq->irqwork);
14372 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14374 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14375 "0383 Cannot schedule queue work "
14376 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14377 cqid, cq->queue_id,
14378 raw_smp_processor_id());
14383 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14384 * @phba: Pointer to HBA context object.
14385 * @eq: Pointer to the queue structure.
14386 * @eqe: Pointer to fast-path event queue entry.
14388 * This routine process a event queue entry from the fast-path event queue.
14389 * It will check the MajorCode and MinorCode to determine this is for a
14390 * completion event on a completion queue, if not, an error shall be logged
14391 * and just return. Otherwise, it will get to the corresponding completion
14392 * queue and process all the entries on the completion queue, rearm the
14393 * completion queue, and then return.
14396 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14397 struct lpfc_eqe *eqe)
14399 struct lpfc_queue *cq = NULL;
14400 uint32_t qidx = eq->hdwq;
14403 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14405 "0366 Not a valid completion "
14406 "event: majorcode=x%x, minorcode=x%x\n",
14407 bf_get_le32(lpfc_eqe_major_code, eqe),
14408 bf_get_le32(lpfc_eqe_minor_code, eqe));
14412 /* Get the reference to the corresponding CQ */
14413 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14415 /* Use the fast lookup method first */
14416 if (cqid <= phba->sli4_hba.cq_max) {
14417 cq = phba->sli4_hba.cq_lookup[cqid];
14422 /* Next check for NVMET completion */
14423 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14424 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14425 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14426 /* Process NVMET unsol rcv */
14427 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14432 if (phba->sli4_hba.nvmels_cq &&
14433 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14434 /* Process NVME unsol rcv */
14435 cq = phba->sli4_hba.nvmels_cq;
14438 /* Otherwise this is a Slow path event */
14440 lpfc_sli4_sp_handle_eqe(phba, eqe,
14441 phba->sli4_hba.hdwq[qidx].hba_eq);
14446 if (unlikely(cqid != cq->queue_id)) {
14447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14448 "0368 Miss-matched fast-path completion "
14449 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14450 cqid, cq->queue_id);
14455 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14456 if (phba->ktime_on)
14457 cq->isr_timestamp = ktime_get_ns();
14459 cq->isr_timestamp = 0;
14461 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14465 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14466 * @cq: Pointer to CQ to be processed
14467 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14469 * This routine calls the cq processing routine with the handler for
14472 * The CQ routine returns two values: the first is the calling status,
14473 * which indicates whether work was queued to the background discovery
14474 * thread. If true, the routine should wakeup the discovery thread;
14475 * the second is the delay parameter. If non-zero, rather than rearming
14476 * the CQ and yet another interrupt, the CQ handler should be queued so
14477 * that it is processed in a subsequent polling action. The value of
14478 * the delay indicates when to reschedule it.
14481 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14482 enum lpfc_poll_mode poll_mode)
14484 struct lpfc_hba *phba = cq->phba;
14485 unsigned long delay;
14486 bool workposted = false;
14489 /* process and rearm the CQ */
14490 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14491 &delay, poll_mode);
14494 if (is_kdump_kernel())
14495 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14498 ret = queue_delayed_work_on(cq->chann, phba->wq,
14499 &cq->sched_irqwork, delay);
14501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14502 "0367 Cannot schedule queue work "
14503 "for cqid=%d on CPU %d\n",
14504 cq->queue_id, cq->chann);
14507 /* wake up worker thread if there are works to be done */
14509 lpfc_worker_wake_up(phba);
14513 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14515 * @work: pointer to work element
14517 * translates from the work handler and calls the fast-path handler.
14520 lpfc_sli4_hba_process_cq(struct work_struct *work)
14522 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14524 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14528 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14529 * @work: pointer to work element
14531 * translates from the work handler and calls the fast-path handler.
14534 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14536 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14537 struct lpfc_queue, sched_irqwork);
14539 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14543 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14544 * @irq: Interrupt number.
14545 * @dev_id: The device context pointer.
14547 * This function is directly called from the PCI layer as an interrupt
14548 * service routine when device with SLI-4 interface spec is enabled with
14549 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14550 * ring event in the HBA. However, when the device is enabled with either
14551 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14552 * device-level interrupt handler. When the PCI slot is in error recovery
14553 * or the HBA is undergoing initialization, the interrupt handler will not
14554 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14555 * the intrrupt context. This function is called without any lock held.
14556 * It gets the hbalock to access and update SLI data structures. Note that,
14557 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14558 * equal to that of FCP CQ index.
14560 * The link attention and ELS ring attention events are handled
14561 * by the worker thread. The interrupt handler signals the worker thread
14562 * and returns for these events. This function is called without any lock
14563 * held. It gets the hbalock to access and update SLI data structures.
14565 * This function returns IRQ_HANDLED when interrupt is handled else it
14566 * returns IRQ_NONE.
14569 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14571 struct lpfc_hba *phba;
14572 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14573 struct lpfc_queue *fpeq;
14574 unsigned long iflag;
14577 struct lpfc_eq_intr_info *eqi;
14579 /* Get the driver's phba structure from the dev_id */
14580 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14581 phba = hba_eq_hdl->phba;
14582 hba_eqidx = hba_eq_hdl->idx;
14584 if (unlikely(!phba))
14586 if (unlikely(!phba->sli4_hba.hdwq))
14589 /* Get to the EQ struct associated with this vector */
14590 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14591 if (unlikely(!fpeq))
14594 /* Check device state for handling interrupt */
14595 if (unlikely(lpfc_intr_state_check(phba))) {
14596 /* Check again for link_state with lock held */
14597 spin_lock_irqsave(&phba->hbalock, iflag);
14598 if (phba->link_state < LPFC_LINK_DOWN)
14599 /* Flush, clear interrupt, and rearm the EQ */
14600 lpfc_sli4_eqcq_flush(phba, fpeq);
14601 spin_unlock_irqrestore(&phba->hbalock, iflag);
14605 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14608 fpeq->last_cpu = raw_smp_processor_id();
14610 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14611 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14612 phba->cfg_auto_imax &&
14613 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14614 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14615 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14617 /* process and rearm the EQ */
14618 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14620 if (unlikely(ecount == 0)) {
14621 fpeq->EQ_no_entry++;
14622 if (phba->intr_type == MSIX)
14623 /* MSI-X treated interrupt served as no EQ share INT */
14624 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14625 "0358 MSI-X interrupt with no EQE\n");
14627 /* Non MSI-X treated on interrupt as EQ share INT */
14631 return IRQ_HANDLED;
14632 } /* lpfc_sli4_fp_intr_handler */
14635 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14636 * @irq: Interrupt number.
14637 * @dev_id: The device context pointer.
14639 * This function is the device-level interrupt handler to device with SLI-4
14640 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14641 * interrupt mode is enabled and there is an event in the HBA which requires
14642 * driver attention. This function invokes the slow-path interrupt attention
14643 * handling function and fast-path interrupt attention handling function in
14644 * turn to process the relevant HBA attention events. This function is called
14645 * without any lock held. It gets the hbalock to access and update SLI data
14648 * This function returns IRQ_HANDLED when interrupt is handled, else it
14649 * returns IRQ_NONE.
14652 lpfc_sli4_intr_handler(int irq, void *dev_id)
14654 struct lpfc_hba *phba;
14655 irqreturn_t hba_irq_rc;
14656 bool hba_handled = false;
14659 /* Get the driver's phba structure from the dev_id */
14660 phba = (struct lpfc_hba *)dev_id;
14662 if (unlikely(!phba))
14666 * Invoke fast-path host attention interrupt handling as appropriate.
14668 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14669 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14670 &phba->sli4_hba.hba_eq_hdl[qidx]);
14671 if (hba_irq_rc == IRQ_HANDLED)
14672 hba_handled |= true;
14675 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14676 } /* lpfc_sli4_intr_handler */
14678 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14680 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14681 struct lpfc_queue *eq;
14686 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14687 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14688 if (!list_empty(&phba->poll_list))
14689 mod_timer(&phba->cpuhp_poll_timer,
14690 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14695 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14697 struct lpfc_hba *phba = eq->phba;
14701 * Unlocking an irq is one of the entry point to check
14702 * for re-schedule, but we are good for io submission
14703 * path as midlayer does a get_cpu to glue us in. Flush
14704 * out the invalidate queue so we can see the updated
14709 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14710 /* We will not likely get the completion for the caller
14711 * during this iteration but i guess that's fine.
14712 * Future io's coming on this eq should be able to
14713 * pick it up. As for the case of single io's, they
14714 * will be handled through a sched from polling timer
14715 * function which is currently triggered every 1msec.
14717 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14722 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14724 struct lpfc_hba *phba = eq->phba;
14726 /* kickstart slowpath processing if needed */
14727 if (list_empty(&phba->poll_list))
14728 mod_timer(&phba->cpuhp_poll_timer,
14729 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14731 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14735 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14737 struct lpfc_hba *phba = eq->phba;
14739 /* Disable slowpath processing for this eq. Kick start the eq
14740 * by RE-ARMING the eq's ASAP
14742 list_del_rcu(&eq->_poll_list);
14745 if (list_empty(&phba->poll_list))
14746 del_timer_sync(&phba->cpuhp_poll_timer);
14749 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14751 struct lpfc_queue *eq, *next;
14753 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14754 list_del(&eq->_poll_list);
14756 INIT_LIST_HEAD(&phba->poll_list);
14761 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14763 if (mode == eq->mode)
14766 * currently this function is only called during a hotplug
14767 * event and the cpu on which this function is executing
14768 * is going offline. By now the hotplug has instructed
14769 * the scheduler to remove this cpu from cpu active mask.
14770 * So we don't need to work about being put aside by the
14771 * scheduler for a high priority process. Yes, the inte-
14772 * rrupts could come but they are known to retire ASAP.
14775 /* Disable polling in the fastpath */
14776 WRITE_ONCE(eq->mode, mode);
14777 /* flush out the store buffer */
14781 * Add this eq to the polling list and start polling. For
14782 * a grace period both interrupt handler and poller will
14783 * try to process the eq _but_ that's fine. We have a
14784 * synchronization mechanism in place (queue_claimed) to
14785 * deal with it. This is just a draining phase for int-
14786 * errupt handler (not eq's) as we have guranteed through
14787 * barrier that all the CPUs have seen the new CQ_POLLED
14788 * state. which will effectively disable the REARMING of
14789 * the EQ. The whole idea is eq's die off eventually as
14790 * we are not rearming EQ's anymore.
14792 mode ? lpfc_sli4_add_to_poll_list(eq) :
14793 lpfc_sli4_remove_from_poll_list(eq);
14796 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14798 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14801 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14803 struct lpfc_hba *phba = eq->phba;
14805 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14807 /* Kick start for the pending io's in h/w.
14808 * Once we switch back to interrupt processing on a eq
14809 * the io path completion will only arm eq's when it
14810 * receives a completion. But since eq's are in disa-
14811 * rmed state it doesn't receive a completion. This
14812 * creates a deadlock scenaro.
14814 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14818 * lpfc_sli4_queue_free - free a queue structure and associated memory
14819 * @queue: The queue structure to free.
14821 * This function frees a queue structure and the DMAable memory used for
14822 * the host resident queue. This function must be called after destroying the
14823 * queue on the HBA.
14826 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14828 struct lpfc_dmabuf *dmabuf;
14833 if (!list_empty(&queue->wq_list))
14834 list_del(&queue->wq_list);
14836 while (!list_empty(&queue->page_list)) {
14837 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14839 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14840 dmabuf->virt, dmabuf->phys);
14844 lpfc_free_rq_buffer(queue->phba, queue);
14845 kfree(queue->rqbp);
14848 if (!list_empty(&queue->cpu_list))
14849 list_del(&queue->cpu_list);
14856 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14857 * @phba: The HBA that this queue is being created on.
14858 * @page_size: The size of a queue page
14859 * @entry_size: The size of each queue entry for this queue.
14860 * @entry_count: The number of entries that this queue will handle.
14861 * @cpu: The cpu that will primarily utilize this queue.
14863 * This function allocates a queue structure and the DMAable memory used for
14864 * the host resident queue. This function must be called before creating the
14865 * queue on the HBA.
14867 struct lpfc_queue *
14868 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14869 uint32_t entry_size, uint32_t entry_count, int cpu)
14871 struct lpfc_queue *queue;
14872 struct lpfc_dmabuf *dmabuf;
14873 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14876 if (!phba->sli4_hba.pc_sli4_params.supported)
14877 hw_page_size = page_size;
14879 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14881 /* If needed, Adjust page count to match the max the adapter supports */
14882 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14883 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14885 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14886 GFP_KERNEL, cpu_to_node(cpu));
14890 INIT_LIST_HEAD(&queue->list);
14891 INIT_LIST_HEAD(&queue->_poll_list);
14892 INIT_LIST_HEAD(&queue->wq_list);
14893 INIT_LIST_HEAD(&queue->wqfull_list);
14894 INIT_LIST_HEAD(&queue->page_list);
14895 INIT_LIST_HEAD(&queue->child_list);
14896 INIT_LIST_HEAD(&queue->cpu_list);
14898 /* Set queue parameters now. If the system cannot provide memory
14899 * resources, the free routine needs to know what was allocated.
14901 queue->page_count = pgcnt;
14902 queue->q_pgs = (void **)&queue[1];
14903 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14904 queue->entry_size = entry_size;
14905 queue->entry_count = entry_count;
14906 queue->page_size = hw_page_size;
14907 queue->phba = phba;
14909 for (x = 0; x < queue->page_count; x++) {
14910 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14911 dev_to_node(&phba->pcidev->dev));
14914 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14915 hw_page_size, &dmabuf->phys,
14917 if (!dmabuf->virt) {
14921 dmabuf->buffer_tag = x;
14922 list_add_tail(&dmabuf->list, &queue->page_list);
14923 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14924 queue->q_pgs[x] = dmabuf->virt;
14926 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14927 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14928 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14929 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14931 /* notify_interval will be set during q creation */
14935 lpfc_sli4_queue_free(queue);
14940 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14941 * @phba: HBA structure that indicates port to create a queue on.
14942 * @pci_barset: PCI BAR set flag.
14944 * This function shall perform iomap of the specified PCI BAR address to host
14945 * memory address if not already done so and return it. The returned host
14946 * memory address can be NULL.
14948 static void __iomem *
14949 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14954 switch (pci_barset) {
14955 case WQ_PCI_BAR_0_AND_1:
14956 return phba->pci_bar0_memmap_p;
14957 case WQ_PCI_BAR_2_AND_3:
14958 return phba->pci_bar2_memmap_p;
14959 case WQ_PCI_BAR_4_AND_5:
14960 return phba->pci_bar4_memmap_p;
14968 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14969 * @phba: HBA structure that EQs are on.
14970 * @startq: The starting EQ index to modify
14971 * @numq: The number of EQs (consecutive indexes) to modify
14972 * @usdelay: amount of delay
14974 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14975 * is set either by writing to a register (if supported by the SLI Port)
14976 * or by mailbox command. The mailbox command allows several EQs to be
14979 * The @phba struct is used to send a mailbox command to HBA. The @startq
14980 * is used to get the starting EQ index to change. The @numq value is
14981 * used to specify how many consecutive EQ indexes, starting at EQ index,
14982 * are to be changed. This function is asynchronous and will wait for any
14983 * mailbox commands to finish before returning.
14985 * On success this function will return a zero. If unable to allocate
14986 * enough memory this function will return -ENOMEM. If a mailbox command
14987 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14988 * have had their delay multipler changed.
14991 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14992 uint32_t numq, uint32_t usdelay)
14994 struct lpfc_mbx_modify_eq_delay *eq_delay;
14995 LPFC_MBOXQ_t *mbox;
14996 struct lpfc_queue *eq;
14997 int cnt = 0, rc, length;
14998 uint32_t shdr_status, shdr_add_status;
15001 union lpfc_sli4_cfg_shdr *shdr;
15003 if (startq >= phba->cfg_irq_chann)
15006 if (usdelay > 0xFFFF) {
15007 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15008 "6429 usdelay %d too large. Scaled down to "
15009 "0xFFFF.\n", usdelay);
15013 /* set values by EQ_DELAY register if supported */
15014 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15015 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15016 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15020 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15028 /* Otherwise, set values by mailbox cmd */
15030 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15032 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15033 "6428 Failed allocating mailbox cmd buffer."
15034 " EQ delay was not set.\n");
15037 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15038 sizeof(struct lpfc_sli4_cfg_mhdr));
15039 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15040 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15041 length, LPFC_SLI4_MBX_EMBED);
15042 eq_delay = &mbox->u.mqe.un.eq_delay;
15044 /* Calculate delay multiper from maximum interrupt per second */
15045 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15048 if (dmult > LPFC_DMULT_MAX)
15049 dmult = LPFC_DMULT_MAX;
15051 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15052 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15055 eq->q_mode = usdelay;
15056 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15057 eq_delay->u.request.eq[cnt].phase = 0;
15058 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15063 eq_delay->u.request.num_eq = cnt;
15065 mbox->vport = phba->pport;
15066 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15067 mbox->ctx_buf = NULL;
15068 mbox->ctx_ndlp = NULL;
15069 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15070 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15071 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15072 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15073 if (shdr_status || shdr_add_status || rc) {
15074 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15075 "2512 MODIFY_EQ_DELAY mailbox failed with "
15076 "status x%x add_status x%x, mbx status x%x\n",
15077 shdr_status, shdr_add_status, rc);
15079 mempool_free(mbox, phba->mbox_mem_pool);
15084 * lpfc_eq_create - Create an Event Queue on the HBA
15085 * @phba: HBA structure that indicates port to create a queue on.
15086 * @eq: The queue structure to use to create the event queue.
15087 * @imax: The maximum interrupt per second limit.
15089 * This function creates an event queue, as detailed in @eq, on a port,
15090 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15092 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15093 * is used to get the entry count and entry size that are necessary to
15094 * determine the number of pages to allocate and use for this queue. This
15095 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15096 * event queue. This function is asynchronous and will wait for the mailbox
15097 * command to finish before continuing.
15099 * On success this function will return a zero. If unable to allocate enough
15100 * memory this function will return -ENOMEM. If the queue create mailbox command
15101 * fails this function will return -ENXIO.
15104 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15106 struct lpfc_mbx_eq_create *eq_create;
15107 LPFC_MBOXQ_t *mbox;
15108 int rc, length, status = 0;
15109 struct lpfc_dmabuf *dmabuf;
15110 uint32_t shdr_status, shdr_add_status;
15111 union lpfc_sli4_cfg_shdr *shdr;
15113 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15115 /* sanity check on queue memory */
15118 if (!phba->sli4_hba.pc_sli4_params.supported)
15119 hw_page_size = SLI4_PAGE_SIZE;
15121 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15124 length = (sizeof(struct lpfc_mbx_eq_create) -
15125 sizeof(struct lpfc_sli4_cfg_mhdr));
15126 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15127 LPFC_MBOX_OPCODE_EQ_CREATE,
15128 length, LPFC_SLI4_MBX_EMBED);
15129 eq_create = &mbox->u.mqe.un.eq_create;
15130 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15131 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15133 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15135 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15137 /* Use version 2 of CREATE_EQ if eqav is set */
15138 if (phba->sli4_hba.pc_sli4_params.eqav) {
15139 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15140 LPFC_Q_CREATE_VERSION_2);
15141 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15142 phba->sli4_hba.pc_sli4_params.eqav);
15145 /* don't setup delay multiplier using EQ_CREATE */
15147 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15149 switch (eq->entry_count) {
15151 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15152 "0360 Unsupported EQ count. (%d)\n",
15154 if (eq->entry_count < 256) {
15158 fallthrough; /* otherwise default to smallest count */
15160 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15164 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15168 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15172 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15176 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15180 list_for_each_entry(dmabuf, &eq->page_list, list) {
15181 memset(dmabuf->virt, 0, hw_page_size);
15182 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15183 putPaddrLow(dmabuf->phys);
15184 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15185 putPaddrHigh(dmabuf->phys);
15187 mbox->vport = phba->pport;
15188 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15189 mbox->ctx_buf = NULL;
15190 mbox->ctx_ndlp = NULL;
15191 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15192 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15193 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15194 if (shdr_status || shdr_add_status || rc) {
15195 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15196 "2500 EQ_CREATE mailbox failed with "
15197 "status x%x add_status x%x, mbx status x%x\n",
15198 shdr_status, shdr_add_status, rc);
15201 eq->type = LPFC_EQ;
15202 eq->subtype = LPFC_NONE;
15203 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15204 if (eq->queue_id == 0xFFFF)
15206 eq->host_index = 0;
15207 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15208 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15210 mempool_free(mbox, phba->mbox_mem_pool);
15214 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15216 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15218 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15224 * lpfc_cq_create - Create a Completion Queue on the HBA
15225 * @phba: HBA structure that indicates port to create a queue on.
15226 * @cq: The queue structure to use to create the completion queue.
15227 * @eq: The event queue to bind this completion queue to.
15228 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15229 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15231 * This function creates a completion queue, as detailed in @wq, on a port,
15232 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15234 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15235 * is used to get the entry count and entry size that are necessary to
15236 * determine the number of pages to allocate and use for this queue. The @eq
15237 * is used to indicate which event queue to bind this completion queue to. This
15238 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15239 * completion queue. This function is asynchronous and will wait for the mailbox
15240 * command to finish before continuing.
15242 * On success this function will return a zero. If unable to allocate enough
15243 * memory this function will return -ENOMEM. If the queue create mailbox command
15244 * fails this function will return -ENXIO.
15247 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15248 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15250 struct lpfc_mbx_cq_create *cq_create;
15251 struct lpfc_dmabuf *dmabuf;
15252 LPFC_MBOXQ_t *mbox;
15253 int rc, length, status = 0;
15254 uint32_t shdr_status, shdr_add_status;
15255 union lpfc_sli4_cfg_shdr *shdr;
15257 /* sanity check on queue memory */
15261 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15264 length = (sizeof(struct lpfc_mbx_cq_create) -
15265 sizeof(struct lpfc_sli4_cfg_mhdr));
15266 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15267 LPFC_MBOX_OPCODE_CQ_CREATE,
15268 length, LPFC_SLI4_MBX_EMBED);
15269 cq_create = &mbox->u.mqe.un.cq_create;
15270 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15271 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15273 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15274 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15275 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15276 phba->sli4_hba.pc_sli4_params.cqv);
15277 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15278 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15279 (cq->page_size / SLI4_PAGE_SIZE));
15280 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15282 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15283 phba->sli4_hba.pc_sli4_params.cqav);
15285 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15288 switch (cq->entry_count) {
15291 if (phba->sli4_hba.pc_sli4_params.cqv ==
15292 LPFC_Q_CREATE_VERSION_2) {
15293 cq_create->u.request.context.lpfc_cq_context_count =
15295 bf_set(lpfc_cq_context_count,
15296 &cq_create->u.request.context,
15297 LPFC_CQ_CNT_WORD7);
15302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15303 "0361 Unsupported CQ count: "
15304 "entry cnt %d sz %d pg cnt %d\n",
15305 cq->entry_count, cq->entry_size,
15307 if (cq->entry_count < 256) {
15311 fallthrough; /* otherwise default to smallest count */
15313 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15317 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15321 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15325 list_for_each_entry(dmabuf, &cq->page_list, list) {
15326 memset(dmabuf->virt, 0, cq->page_size);
15327 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15328 putPaddrLow(dmabuf->phys);
15329 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15330 putPaddrHigh(dmabuf->phys);
15332 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15334 /* The IOCTL status is embedded in the mailbox subheader. */
15335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15337 if (shdr_status || shdr_add_status || rc) {
15338 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15339 "2501 CQ_CREATE mailbox failed with "
15340 "status x%x add_status x%x, mbx status x%x\n",
15341 shdr_status, shdr_add_status, rc);
15345 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15346 if (cq->queue_id == 0xFFFF) {
15350 /* link the cq onto the parent eq child list */
15351 list_add_tail(&cq->list, &eq->child_list);
15352 /* Set up completion queue's type and subtype */
15354 cq->subtype = subtype;
15355 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15356 cq->assoc_qid = eq->queue_id;
15358 cq->host_index = 0;
15359 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15360 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15362 if (cq->queue_id > phba->sli4_hba.cq_max)
15363 phba->sli4_hba.cq_max = cq->queue_id;
15365 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15367 mempool_free(mbox, phba->mbox_mem_pool);
15372 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15373 * @phba: HBA structure that indicates port to create a queue on.
15374 * @cqp: The queue structure array to use to create the completion queues.
15375 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15376 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15377 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15379 * This function creates a set of completion queue, s to support MRQ
15380 * as detailed in @cqp, on a port,
15381 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15383 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15384 * is used to get the entry count and entry size that are necessary to
15385 * determine the number of pages to allocate and use for this queue. The @eq
15386 * is used to indicate which event queue to bind this completion queue to. This
15387 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15388 * completion queue. This function is asynchronous and will wait for the mailbox
15389 * command to finish before continuing.
15391 * On success this function will return a zero. If unable to allocate enough
15392 * memory this function will return -ENOMEM. If the queue create mailbox command
15393 * fails this function will return -ENXIO.
15396 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15397 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15400 struct lpfc_queue *cq;
15401 struct lpfc_queue *eq;
15402 struct lpfc_mbx_cq_create_set *cq_set;
15403 struct lpfc_dmabuf *dmabuf;
15404 LPFC_MBOXQ_t *mbox;
15405 int rc, length, alloclen, status = 0;
15406 int cnt, idx, numcq, page_idx = 0;
15407 uint32_t shdr_status, shdr_add_status;
15408 union lpfc_sli4_cfg_shdr *shdr;
15409 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15411 /* sanity check on queue memory */
15412 numcq = phba->cfg_nvmet_mrq;
15413 if (!cqp || !hdwq || !numcq)
15416 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15420 length = sizeof(struct lpfc_mbx_cq_create_set);
15421 length += ((numcq * cqp[0]->page_count) *
15422 sizeof(struct dma_address));
15423 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15424 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15425 LPFC_SLI4_MBX_NEMBED);
15426 if (alloclen < length) {
15427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15428 "3098 Allocated DMA memory size (%d) is "
15429 "less than the requested DMA memory size "
15430 "(%d)\n", alloclen, length);
15434 cq_set = mbox->sge_array->addr[0];
15435 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15436 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15438 for (idx = 0; idx < numcq; idx++) {
15440 eq = hdwq[idx].hba_eq;
15445 if (!phba->sli4_hba.pc_sli4_params.supported)
15446 hw_page_size = cq->page_size;
15450 bf_set(lpfc_mbx_cq_create_set_page_size,
15451 &cq_set->u.request,
15452 (hw_page_size / SLI4_PAGE_SIZE));
15453 bf_set(lpfc_mbx_cq_create_set_num_pages,
15454 &cq_set->u.request, cq->page_count);
15455 bf_set(lpfc_mbx_cq_create_set_evt,
15456 &cq_set->u.request, 1);
15457 bf_set(lpfc_mbx_cq_create_set_valid,
15458 &cq_set->u.request, 1);
15459 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15460 &cq_set->u.request, 0);
15461 bf_set(lpfc_mbx_cq_create_set_num_cq,
15462 &cq_set->u.request, numcq);
15463 bf_set(lpfc_mbx_cq_create_set_autovalid,
15464 &cq_set->u.request,
15465 phba->sli4_hba.pc_sli4_params.cqav);
15466 switch (cq->entry_count) {
15469 if (phba->sli4_hba.pc_sli4_params.cqv ==
15470 LPFC_Q_CREATE_VERSION_2) {
15471 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15472 &cq_set->u.request,
15474 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15475 &cq_set->u.request,
15476 LPFC_CQ_CNT_WORD7);
15481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15482 "3118 Bad CQ count. (%d)\n",
15484 if (cq->entry_count < 256) {
15488 fallthrough; /* otherwise default to smallest */
15490 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15491 &cq_set->u.request, LPFC_CQ_CNT_256);
15494 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15495 &cq_set->u.request, LPFC_CQ_CNT_512);
15498 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15499 &cq_set->u.request, LPFC_CQ_CNT_1024);
15502 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15503 &cq_set->u.request, eq->queue_id);
15506 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15507 &cq_set->u.request, eq->queue_id);
15510 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15511 &cq_set->u.request, eq->queue_id);
15514 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15515 &cq_set->u.request, eq->queue_id);
15518 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15519 &cq_set->u.request, eq->queue_id);
15522 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15523 &cq_set->u.request, eq->queue_id);
15526 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15527 &cq_set->u.request, eq->queue_id);
15530 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15531 &cq_set->u.request, eq->queue_id);
15534 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15535 &cq_set->u.request, eq->queue_id);
15538 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15539 &cq_set->u.request, eq->queue_id);
15542 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15543 &cq_set->u.request, eq->queue_id);
15546 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15547 &cq_set->u.request, eq->queue_id);
15550 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15551 &cq_set->u.request, eq->queue_id);
15554 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15555 &cq_set->u.request, eq->queue_id);
15558 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15559 &cq_set->u.request, eq->queue_id);
15562 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15563 &cq_set->u.request, eq->queue_id);
15567 /* link the cq onto the parent eq child list */
15568 list_add_tail(&cq->list, &eq->child_list);
15569 /* Set up completion queue's type and subtype */
15571 cq->subtype = subtype;
15572 cq->assoc_qid = eq->queue_id;
15574 cq->host_index = 0;
15575 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15576 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15581 list_for_each_entry(dmabuf, &cq->page_list, list) {
15582 memset(dmabuf->virt, 0, hw_page_size);
15583 cnt = page_idx + dmabuf->buffer_tag;
15584 cq_set->u.request.page[cnt].addr_lo =
15585 putPaddrLow(dmabuf->phys);
15586 cq_set->u.request.page[cnt].addr_hi =
15587 putPaddrHigh(dmabuf->phys);
15593 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15595 /* The IOCTL status is embedded in the mailbox subheader. */
15596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15598 if (shdr_status || shdr_add_status || rc) {
15599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15600 "3119 CQ_CREATE_SET mailbox failed with "
15601 "status x%x add_status x%x, mbx status x%x\n",
15602 shdr_status, shdr_add_status, rc);
15606 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15607 if (rc == 0xFFFF) {
15612 for (idx = 0; idx < numcq; idx++) {
15614 cq->queue_id = rc + idx;
15615 if (cq->queue_id > phba->sli4_hba.cq_max)
15616 phba->sli4_hba.cq_max = cq->queue_id;
15620 lpfc_sli4_mbox_cmd_free(phba, mbox);
15625 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15626 * @phba: HBA structure that indicates port to create a queue on.
15627 * @mq: The queue structure to use to create the mailbox queue.
15628 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15629 * @cq: The completion queue to associate with this cq.
15631 * This function provides failback (fb) functionality when the
15632 * mq_create_ext fails on older FW generations. It's purpose is identical
15633 * to mq_create_ext otherwise.
15635 * This routine cannot fail as all attributes were previously accessed and
15636 * initialized in mq_create_ext.
15639 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15640 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15642 struct lpfc_mbx_mq_create *mq_create;
15643 struct lpfc_dmabuf *dmabuf;
15646 length = (sizeof(struct lpfc_mbx_mq_create) -
15647 sizeof(struct lpfc_sli4_cfg_mhdr));
15648 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15649 LPFC_MBOX_OPCODE_MQ_CREATE,
15650 length, LPFC_SLI4_MBX_EMBED);
15651 mq_create = &mbox->u.mqe.un.mq_create;
15652 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15654 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15656 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15657 switch (mq->entry_count) {
15659 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15660 LPFC_MQ_RING_SIZE_16);
15663 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15664 LPFC_MQ_RING_SIZE_32);
15667 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15668 LPFC_MQ_RING_SIZE_64);
15671 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15672 LPFC_MQ_RING_SIZE_128);
15675 list_for_each_entry(dmabuf, &mq->page_list, list) {
15676 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15677 putPaddrLow(dmabuf->phys);
15678 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15679 putPaddrHigh(dmabuf->phys);
15684 * lpfc_mq_create - Create a mailbox Queue on the HBA
15685 * @phba: HBA structure that indicates port to create a queue on.
15686 * @mq: The queue structure to use to create the mailbox queue.
15687 * @cq: The completion queue to associate with this cq.
15688 * @subtype: The queue's subtype.
15690 * This function creates a mailbox queue, as detailed in @mq, on a port,
15691 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15693 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15694 * is used to get the entry count and entry size that are necessary to
15695 * determine the number of pages to allocate and use for this queue. This
15696 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15697 * mailbox queue. This function is asynchronous and will wait for the mailbox
15698 * command to finish before continuing.
15700 * On success this function will return a zero. If unable to allocate enough
15701 * memory this function will return -ENOMEM. If the queue create mailbox command
15702 * fails this function will return -ENXIO.
15705 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15706 struct lpfc_queue *cq, uint32_t subtype)
15708 struct lpfc_mbx_mq_create *mq_create;
15709 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15710 struct lpfc_dmabuf *dmabuf;
15711 LPFC_MBOXQ_t *mbox;
15712 int rc, length, status = 0;
15713 uint32_t shdr_status, shdr_add_status;
15714 union lpfc_sli4_cfg_shdr *shdr;
15715 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15717 /* sanity check on queue memory */
15720 if (!phba->sli4_hba.pc_sli4_params.supported)
15721 hw_page_size = SLI4_PAGE_SIZE;
15723 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15726 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15727 sizeof(struct lpfc_sli4_cfg_mhdr));
15728 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15729 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15730 length, LPFC_SLI4_MBX_EMBED);
15732 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15733 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15734 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15735 &mq_create_ext->u.request, mq->page_count);
15736 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15737 &mq_create_ext->u.request, 1);
15738 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15739 &mq_create_ext->u.request, 1);
15740 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15741 &mq_create_ext->u.request, 1);
15742 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15743 &mq_create_ext->u.request, 1);
15744 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15745 &mq_create_ext->u.request, 1);
15746 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15747 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15748 phba->sli4_hba.pc_sli4_params.mqv);
15749 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15750 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15753 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15755 switch (mq->entry_count) {
15757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15758 "0362 Unsupported MQ count. (%d)\n",
15760 if (mq->entry_count < 16) {
15764 fallthrough; /* otherwise default to smallest count */
15766 bf_set(lpfc_mq_context_ring_size,
15767 &mq_create_ext->u.request.context,
15768 LPFC_MQ_RING_SIZE_16);
15771 bf_set(lpfc_mq_context_ring_size,
15772 &mq_create_ext->u.request.context,
15773 LPFC_MQ_RING_SIZE_32);
15776 bf_set(lpfc_mq_context_ring_size,
15777 &mq_create_ext->u.request.context,
15778 LPFC_MQ_RING_SIZE_64);
15781 bf_set(lpfc_mq_context_ring_size,
15782 &mq_create_ext->u.request.context,
15783 LPFC_MQ_RING_SIZE_128);
15786 list_for_each_entry(dmabuf, &mq->page_list, list) {
15787 memset(dmabuf->virt, 0, hw_page_size);
15788 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15789 putPaddrLow(dmabuf->phys);
15790 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15791 putPaddrHigh(dmabuf->phys);
15793 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15794 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15795 &mq_create_ext->u.response);
15796 if (rc != MBX_SUCCESS) {
15797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15798 "2795 MQ_CREATE_EXT failed with "
15799 "status x%x. Failback to MQ_CREATE.\n",
15801 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15802 mq_create = &mbox->u.mqe.un.mq_create;
15803 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15804 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15805 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15806 &mq_create->u.response);
15809 /* The IOCTL status is embedded in the mailbox subheader. */
15810 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15811 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15812 if (shdr_status || shdr_add_status || rc) {
15813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15814 "2502 MQ_CREATE mailbox failed with "
15815 "status x%x add_status x%x, mbx status x%x\n",
15816 shdr_status, shdr_add_status, rc);
15820 if (mq->queue_id == 0xFFFF) {
15824 mq->type = LPFC_MQ;
15825 mq->assoc_qid = cq->queue_id;
15826 mq->subtype = subtype;
15827 mq->host_index = 0;
15830 /* link the mq onto the parent cq child list */
15831 list_add_tail(&mq->list, &cq->child_list);
15833 mempool_free(mbox, phba->mbox_mem_pool);
15838 * lpfc_wq_create - Create a Work Queue on the HBA
15839 * @phba: HBA structure that indicates port to create a queue on.
15840 * @wq: The queue structure to use to create the work queue.
15841 * @cq: The completion queue to bind this work queue to.
15842 * @subtype: The subtype of the work queue indicating its functionality.
15844 * This function creates a work queue, as detailed in @wq, on a port, described
15845 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15847 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15848 * is used to get the entry count and entry size that are necessary to
15849 * determine the number of pages to allocate and use for this queue. The @cq
15850 * is used to indicate which completion queue to bind this work queue to. This
15851 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15852 * work queue. This function is asynchronous and will wait for the mailbox
15853 * command to finish before continuing.
15855 * On success this function will return a zero. If unable to allocate enough
15856 * memory this function will return -ENOMEM. If the queue create mailbox command
15857 * fails this function will return -ENXIO.
15860 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15861 struct lpfc_queue *cq, uint32_t subtype)
15863 struct lpfc_mbx_wq_create *wq_create;
15864 struct lpfc_dmabuf *dmabuf;
15865 LPFC_MBOXQ_t *mbox;
15866 int rc, length, status = 0;
15867 uint32_t shdr_status, shdr_add_status;
15868 union lpfc_sli4_cfg_shdr *shdr;
15869 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15870 struct dma_address *page;
15871 void __iomem *bar_memmap_p;
15872 uint32_t db_offset;
15873 uint16_t pci_barset;
15874 uint8_t dpp_barset;
15875 uint32_t dpp_offset;
15876 uint8_t wq_create_version;
15878 unsigned long pg_addr;
15881 /* sanity check on queue memory */
15884 if (!phba->sli4_hba.pc_sli4_params.supported)
15885 hw_page_size = wq->page_size;
15887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15890 length = (sizeof(struct lpfc_mbx_wq_create) -
15891 sizeof(struct lpfc_sli4_cfg_mhdr));
15892 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15893 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15894 length, LPFC_SLI4_MBX_EMBED);
15895 wq_create = &mbox->u.mqe.un.wq_create;
15896 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15897 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15899 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15902 /* wqv is the earliest version supported, NOT the latest */
15903 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15904 phba->sli4_hba.pc_sli4_params.wqv);
15906 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15907 (wq->page_size > SLI4_PAGE_SIZE))
15908 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15910 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15913 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15914 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15916 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15918 switch (wq_create_version) {
15919 case LPFC_Q_CREATE_VERSION_1:
15920 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15922 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15923 LPFC_Q_CREATE_VERSION_1);
15925 switch (wq->entry_size) {
15928 bf_set(lpfc_mbx_wq_create_wqe_size,
15929 &wq_create->u.request_1,
15930 LPFC_WQ_WQE_SIZE_64);
15933 bf_set(lpfc_mbx_wq_create_wqe_size,
15934 &wq_create->u.request_1,
15935 LPFC_WQ_WQE_SIZE_128);
15938 /* Request DPP by default */
15939 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15940 bf_set(lpfc_mbx_wq_create_page_size,
15941 &wq_create->u.request_1,
15942 (wq->page_size / SLI4_PAGE_SIZE));
15943 page = wq_create->u.request_1.page;
15946 page = wq_create->u.request.page;
15950 list_for_each_entry(dmabuf, &wq->page_list, list) {
15951 memset(dmabuf->virt, 0, hw_page_size);
15952 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15953 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15956 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15957 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15959 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15960 /* The IOCTL status is embedded in the mailbox subheader. */
15961 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15962 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15963 if (shdr_status || shdr_add_status || rc) {
15964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15965 "2503 WQ_CREATE mailbox failed with "
15966 "status x%x add_status x%x, mbx status x%x\n",
15967 shdr_status, shdr_add_status, rc);
15972 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15973 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15974 &wq_create->u.response);
15976 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15977 &wq_create->u.response_1);
15979 if (wq->queue_id == 0xFFFF) {
15984 wq->db_format = LPFC_DB_LIST_FORMAT;
15985 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15986 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15987 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15988 &wq_create->u.response);
15989 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15990 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15992 "3265 WQ[%d] doorbell format "
15993 "not supported: x%x\n",
15994 wq->queue_id, wq->db_format);
15998 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15999 &wq_create->u.response);
16000 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16002 if (!bar_memmap_p) {
16003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16004 "3263 WQ[%d] failed to memmap "
16005 "pci barset:x%x\n",
16006 wq->queue_id, pci_barset);
16010 db_offset = wq_create->u.response.doorbell_offset;
16011 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16012 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16014 "3252 WQ[%d] doorbell offset "
16015 "not supported: x%x\n",
16016 wq->queue_id, db_offset);
16020 wq->db_regaddr = bar_memmap_p + db_offset;
16021 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16022 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16023 "format:x%x\n", wq->queue_id,
16024 pci_barset, db_offset, wq->db_format);
16026 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16028 /* Check if DPP was honored by the firmware */
16029 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16030 &wq_create->u.response_1);
16031 if (wq->dpp_enable) {
16032 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16033 &wq_create->u.response_1);
16034 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16036 if (!bar_memmap_p) {
16037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16038 "3267 WQ[%d] failed to memmap "
16039 "pci barset:x%x\n",
16040 wq->queue_id, pci_barset);
16044 db_offset = wq_create->u.response_1.doorbell_offset;
16045 wq->db_regaddr = bar_memmap_p + db_offset;
16046 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16047 &wq_create->u.response_1);
16048 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16049 &wq_create->u.response_1);
16050 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16052 if (!bar_memmap_p) {
16053 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16054 "3268 WQ[%d] failed to memmap "
16055 "pci barset:x%x\n",
16056 wq->queue_id, dpp_barset);
16060 dpp_offset = wq_create->u.response_1.dpp_offset;
16061 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16062 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16063 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16064 "dpp_id:x%x dpp_barset:x%x "
16065 "dpp_offset:x%x\n",
16066 wq->queue_id, pci_barset, db_offset,
16067 wq->dpp_id, dpp_barset, dpp_offset);
16070 /* Enable combined writes for DPP aperture */
16071 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16072 rc = set_memory_wc(pg_addr, 1);
16074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16075 "3272 Cannot setup Combined "
16076 "Write on WQ[%d] - disable DPP\n",
16078 phba->cfg_enable_dpp = 0;
16081 phba->cfg_enable_dpp = 0;
16084 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16086 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16087 if (wq->pring == NULL) {
16091 wq->type = LPFC_WQ;
16092 wq->assoc_qid = cq->queue_id;
16093 wq->subtype = subtype;
16094 wq->host_index = 0;
16096 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16098 /* link the wq onto the parent cq child list */
16099 list_add_tail(&wq->list, &cq->child_list);
16101 mempool_free(mbox, phba->mbox_mem_pool);
16106 * lpfc_rq_create - Create a Receive Queue on the HBA
16107 * @phba: HBA structure that indicates port to create a queue on.
16108 * @hrq: The queue structure to use to create the header receive queue.
16109 * @drq: The queue structure to use to create the data receive queue.
16110 * @cq: The completion queue to bind this work queue to.
16111 * @subtype: The subtype of the work queue indicating its functionality.
16113 * This function creates a receive buffer queue pair , as detailed in @hrq and
16114 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16117 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16118 * struct is used to get the entry count that is necessary to determine the
16119 * number of pages to use for this queue. The @cq is used to indicate which
16120 * completion queue to bind received buffers that are posted to these queues to.
16121 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16122 * receive queue pair. This function is asynchronous and will wait for the
16123 * mailbox command to finish before continuing.
16125 * On success this function will return a zero. If unable to allocate enough
16126 * memory this function will return -ENOMEM. If the queue create mailbox command
16127 * fails this function will return -ENXIO.
16130 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16131 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16133 struct lpfc_mbx_rq_create *rq_create;
16134 struct lpfc_dmabuf *dmabuf;
16135 LPFC_MBOXQ_t *mbox;
16136 int rc, length, status = 0;
16137 uint32_t shdr_status, shdr_add_status;
16138 union lpfc_sli4_cfg_shdr *shdr;
16139 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16140 void __iomem *bar_memmap_p;
16141 uint32_t db_offset;
16142 uint16_t pci_barset;
16144 /* sanity check on queue memory */
16145 if (!hrq || !drq || !cq)
16147 if (!phba->sli4_hba.pc_sli4_params.supported)
16148 hw_page_size = SLI4_PAGE_SIZE;
16150 if (hrq->entry_count != drq->entry_count)
16152 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16155 length = (sizeof(struct lpfc_mbx_rq_create) -
16156 sizeof(struct lpfc_sli4_cfg_mhdr));
16157 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16158 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16159 length, LPFC_SLI4_MBX_EMBED);
16160 rq_create = &mbox->u.mqe.un.rq_create;
16161 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16162 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16163 phba->sli4_hba.pc_sli4_params.rqv);
16164 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16165 bf_set(lpfc_rq_context_rqe_count_1,
16166 &rq_create->u.request.context,
16168 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16169 bf_set(lpfc_rq_context_rqe_size,
16170 &rq_create->u.request.context,
16172 bf_set(lpfc_rq_context_page_size,
16173 &rq_create->u.request.context,
16174 LPFC_RQ_PAGE_SIZE_4096);
16176 switch (hrq->entry_count) {
16178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16179 "2535 Unsupported RQ count. (%d)\n",
16181 if (hrq->entry_count < 512) {
16185 fallthrough; /* otherwise default to smallest count */
16187 bf_set(lpfc_rq_context_rqe_count,
16188 &rq_create->u.request.context,
16189 LPFC_RQ_RING_SIZE_512);
16192 bf_set(lpfc_rq_context_rqe_count,
16193 &rq_create->u.request.context,
16194 LPFC_RQ_RING_SIZE_1024);
16197 bf_set(lpfc_rq_context_rqe_count,
16198 &rq_create->u.request.context,
16199 LPFC_RQ_RING_SIZE_2048);
16202 bf_set(lpfc_rq_context_rqe_count,
16203 &rq_create->u.request.context,
16204 LPFC_RQ_RING_SIZE_4096);
16207 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16208 LPFC_HDR_BUF_SIZE);
16210 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16212 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16214 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16215 memset(dmabuf->virt, 0, hw_page_size);
16216 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16217 putPaddrLow(dmabuf->phys);
16218 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16219 putPaddrHigh(dmabuf->phys);
16221 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16222 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16224 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16225 /* The IOCTL status is embedded in the mailbox subheader. */
16226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16228 if (shdr_status || shdr_add_status || rc) {
16229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16230 "2504 RQ_CREATE mailbox failed with "
16231 "status x%x add_status x%x, mbx status x%x\n",
16232 shdr_status, shdr_add_status, rc);
16236 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16237 if (hrq->queue_id == 0xFFFF) {
16242 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16243 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16244 &rq_create->u.response);
16245 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16246 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16248 "3262 RQ [%d] doorbell format not "
16249 "supported: x%x\n", hrq->queue_id,
16255 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16256 &rq_create->u.response);
16257 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16258 if (!bar_memmap_p) {
16259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16260 "3269 RQ[%d] failed to memmap pci "
16261 "barset:x%x\n", hrq->queue_id,
16267 db_offset = rq_create->u.response.doorbell_offset;
16268 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16269 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16271 "3270 RQ[%d] doorbell offset not "
16272 "supported: x%x\n", hrq->queue_id,
16277 hrq->db_regaddr = bar_memmap_p + db_offset;
16278 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16279 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16280 "format:x%x\n", hrq->queue_id, pci_barset,
16281 db_offset, hrq->db_format);
16283 hrq->db_format = LPFC_DB_RING_FORMAT;
16284 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16286 hrq->type = LPFC_HRQ;
16287 hrq->assoc_qid = cq->queue_id;
16288 hrq->subtype = subtype;
16289 hrq->host_index = 0;
16290 hrq->hba_index = 0;
16291 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16293 /* now create the data queue */
16294 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16295 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16296 length, LPFC_SLI4_MBX_EMBED);
16297 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16298 phba->sli4_hba.pc_sli4_params.rqv);
16299 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16300 bf_set(lpfc_rq_context_rqe_count_1,
16301 &rq_create->u.request.context, hrq->entry_count);
16302 if (subtype == LPFC_NVMET)
16303 rq_create->u.request.context.buffer_size =
16304 LPFC_NVMET_DATA_BUF_SIZE;
16306 rq_create->u.request.context.buffer_size =
16307 LPFC_DATA_BUF_SIZE;
16308 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16310 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16311 (PAGE_SIZE/SLI4_PAGE_SIZE));
16313 switch (drq->entry_count) {
16315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16316 "2536 Unsupported RQ count. (%d)\n",
16318 if (drq->entry_count < 512) {
16322 fallthrough; /* otherwise default to smallest count */
16324 bf_set(lpfc_rq_context_rqe_count,
16325 &rq_create->u.request.context,
16326 LPFC_RQ_RING_SIZE_512);
16329 bf_set(lpfc_rq_context_rqe_count,
16330 &rq_create->u.request.context,
16331 LPFC_RQ_RING_SIZE_1024);
16334 bf_set(lpfc_rq_context_rqe_count,
16335 &rq_create->u.request.context,
16336 LPFC_RQ_RING_SIZE_2048);
16339 bf_set(lpfc_rq_context_rqe_count,
16340 &rq_create->u.request.context,
16341 LPFC_RQ_RING_SIZE_4096);
16344 if (subtype == LPFC_NVMET)
16345 bf_set(lpfc_rq_context_buf_size,
16346 &rq_create->u.request.context,
16347 LPFC_NVMET_DATA_BUF_SIZE);
16349 bf_set(lpfc_rq_context_buf_size,
16350 &rq_create->u.request.context,
16351 LPFC_DATA_BUF_SIZE);
16353 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16355 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16357 list_for_each_entry(dmabuf, &drq->page_list, list) {
16358 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16359 putPaddrLow(dmabuf->phys);
16360 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16361 putPaddrHigh(dmabuf->phys);
16363 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16364 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16365 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16366 /* The IOCTL status is embedded in the mailbox subheader. */
16367 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16368 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16369 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16370 if (shdr_status || shdr_add_status || rc) {
16374 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16375 if (drq->queue_id == 0xFFFF) {
16379 drq->type = LPFC_DRQ;
16380 drq->assoc_qid = cq->queue_id;
16381 drq->subtype = subtype;
16382 drq->host_index = 0;
16383 drq->hba_index = 0;
16384 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16386 /* link the header and data RQs onto the parent cq child list */
16387 list_add_tail(&hrq->list, &cq->child_list);
16388 list_add_tail(&drq->list, &cq->child_list);
16391 mempool_free(mbox, phba->mbox_mem_pool);
16396 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16397 * @phba: HBA structure that indicates port to create a queue on.
16398 * @hrqp: The queue structure array to use to create the header receive queues.
16399 * @drqp: The queue structure array to use to create the data receive queues.
16400 * @cqp: The completion queue array to bind these receive queues to.
16401 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16403 * This function creates a receive buffer queue pair , as detailed in @hrq and
16404 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16407 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16408 * struct is used to get the entry count that is necessary to determine the
16409 * number of pages to use for this queue. The @cq is used to indicate which
16410 * completion queue to bind received buffers that are posted to these queues to.
16411 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16412 * receive queue pair. This function is asynchronous and will wait for the
16413 * mailbox command to finish before continuing.
16415 * On success this function will return a zero. If unable to allocate enough
16416 * memory this function will return -ENOMEM. If the queue create mailbox command
16417 * fails this function will return -ENXIO.
16420 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16421 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16424 struct lpfc_queue *hrq, *drq, *cq;
16425 struct lpfc_mbx_rq_create_v2 *rq_create;
16426 struct lpfc_dmabuf *dmabuf;
16427 LPFC_MBOXQ_t *mbox;
16428 int rc, length, alloclen, status = 0;
16429 int cnt, idx, numrq, page_idx = 0;
16430 uint32_t shdr_status, shdr_add_status;
16431 union lpfc_sli4_cfg_shdr *shdr;
16432 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16434 numrq = phba->cfg_nvmet_mrq;
16435 /* sanity check on array memory */
16436 if (!hrqp || !drqp || !cqp || !numrq)
16438 if (!phba->sli4_hba.pc_sli4_params.supported)
16439 hw_page_size = SLI4_PAGE_SIZE;
16441 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16445 length = sizeof(struct lpfc_mbx_rq_create_v2);
16446 length += ((2 * numrq * hrqp[0]->page_count) *
16447 sizeof(struct dma_address));
16449 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16450 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16451 LPFC_SLI4_MBX_NEMBED);
16452 if (alloclen < length) {
16453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16454 "3099 Allocated DMA memory size (%d) is "
16455 "less than the requested DMA memory size "
16456 "(%d)\n", alloclen, length);
16463 rq_create = mbox->sge_array->addr[0];
16464 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16466 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16469 for (idx = 0; idx < numrq; idx++) {
16474 /* sanity check on queue memory */
16475 if (!hrq || !drq || !cq) {
16480 if (hrq->entry_count != drq->entry_count) {
16486 bf_set(lpfc_mbx_rq_create_num_pages,
16487 &rq_create->u.request,
16489 bf_set(lpfc_mbx_rq_create_rq_cnt,
16490 &rq_create->u.request, (numrq * 2));
16491 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16493 bf_set(lpfc_rq_context_base_cq,
16494 &rq_create->u.request.context,
16496 bf_set(lpfc_rq_context_data_size,
16497 &rq_create->u.request.context,
16498 LPFC_NVMET_DATA_BUF_SIZE);
16499 bf_set(lpfc_rq_context_hdr_size,
16500 &rq_create->u.request.context,
16501 LPFC_HDR_BUF_SIZE);
16502 bf_set(lpfc_rq_context_rqe_count_1,
16503 &rq_create->u.request.context,
16505 bf_set(lpfc_rq_context_rqe_size,
16506 &rq_create->u.request.context,
16508 bf_set(lpfc_rq_context_page_size,
16509 &rq_create->u.request.context,
16510 (PAGE_SIZE/SLI4_PAGE_SIZE));
16513 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16514 memset(dmabuf->virt, 0, hw_page_size);
16515 cnt = page_idx + dmabuf->buffer_tag;
16516 rq_create->u.request.page[cnt].addr_lo =
16517 putPaddrLow(dmabuf->phys);
16518 rq_create->u.request.page[cnt].addr_hi =
16519 putPaddrHigh(dmabuf->phys);
16525 list_for_each_entry(dmabuf, &drq->page_list, list) {
16526 memset(dmabuf->virt, 0, hw_page_size);
16527 cnt = page_idx + dmabuf->buffer_tag;
16528 rq_create->u.request.page[cnt].addr_lo =
16529 putPaddrLow(dmabuf->phys);
16530 rq_create->u.request.page[cnt].addr_hi =
16531 putPaddrHigh(dmabuf->phys);
16536 hrq->db_format = LPFC_DB_RING_FORMAT;
16537 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16538 hrq->type = LPFC_HRQ;
16539 hrq->assoc_qid = cq->queue_id;
16540 hrq->subtype = subtype;
16541 hrq->host_index = 0;
16542 hrq->hba_index = 0;
16543 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16545 drq->db_format = LPFC_DB_RING_FORMAT;
16546 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16547 drq->type = LPFC_DRQ;
16548 drq->assoc_qid = cq->queue_id;
16549 drq->subtype = subtype;
16550 drq->host_index = 0;
16551 drq->hba_index = 0;
16552 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16554 list_add_tail(&hrq->list, &cq->child_list);
16555 list_add_tail(&drq->list, &cq->child_list);
16558 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16559 /* The IOCTL status is embedded in the mailbox subheader. */
16560 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16561 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16562 if (shdr_status || shdr_add_status || rc) {
16563 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16564 "3120 RQ_CREATE mailbox failed with "
16565 "status x%x add_status x%x, mbx status x%x\n",
16566 shdr_status, shdr_add_status, rc);
16570 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16571 if (rc == 0xFFFF) {
16576 /* Initialize all RQs with associated queue id */
16577 for (idx = 0; idx < numrq; idx++) {
16579 hrq->queue_id = rc + (2 * idx);
16581 drq->queue_id = rc + (2 * idx) + 1;
16585 lpfc_sli4_mbox_cmd_free(phba, mbox);
16590 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16591 * @phba: HBA structure that indicates port to destroy a queue on.
16592 * @eq: The queue structure associated with the queue to destroy.
16594 * This function destroys a queue, as detailed in @eq by sending an mailbox
16595 * command, specific to the type of queue, to the HBA.
16597 * The @eq struct is used to get the queue ID of the queue to destroy.
16599 * On success this function will return a zero. If the queue destroy mailbox
16600 * command fails this function will return -ENXIO.
16603 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16605 LPFC_MBOXQ_t *mbox;
16606 int rc, length, status = 0;
16607 uint32_t shdr_status, shdr_add_status;
16608 union lpfc_sli4_cfg_shdr *shdr;
16610 /* sanity check on queue memory */
16614 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16617 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16618 sizeof(struct lpfc_sli4_cfg_mhdr));
16619 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16620 LPFC_MBOX_OPCODE_EQ_DESTROY,
16621 length, LPFC_SLI4_MBX_EMBED);
16622 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16624 mbox->vport = eq->phba->pport;
16625 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16627 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16628 /* The IOCTL status is embedded in the mailbox subheader. */
16629 shdr = (union lpfc_sli4_cfg_shdr *)
16630 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16631 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16632 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16633 if (shdr_status || shdr_add_status || rc) {
16634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16635 "2505 EQ_DESTROY mailbox failed with "
16636 "status x%x add_status x%x, mbx status x%x\n",
16637 shdr_status, shdr_add_status, rc);
16641 /* Remove eq from any list */
16642 list_del_init(&eq->list);
16643 mempool_free(mbox, eq->phba->mbox_mem_pool);
16648 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16649 * @phba: HBA structure that indicates port to destroy a queue on.
16650 * @cq: The queue structure associated with the queue to destroy.
16652 * This function destroys a queue, as detailed in @cq by sending an mailbox
16653 * command, specific to the type of queue, to the HBA.
16655 * The @cq struct is used to get the queue ID of the queue to destroy.
16657 * On success this function will return a zero. If the queue destroy mailbox
16658 * command fails this function will return -ENXIO.
16661 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16663 LPFC_MBOXQ_t *mbox;
16664 int rc, length, status = 0;
16665 uint32_t shdr_status, shdr_add_status;
16666 union lpfc_sli4_cfg_shdr *shdr;
16668 /* sanity check on queue memory */
16671 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16674 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16675 sizeof(struct lpfc_sli4_cfg_mhdr));
16676 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16677 LPFC_MBOX_OPCODE_CQ_DESTROY,
16678 length, LPFC_SLI4_MBX_EMBED);
16679 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16681 mbox->vport = cq->phba->pport;
16682 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16683 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16684 /* The IOCTL status is embedded in the mailbox subheader. */
16685 shdr = (union lpfc_sli4_cfg_shdr *)
16686 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16687 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16688 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16689 if (shdr_status || shdr_add_status || rc) {
16690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16691 "2506 CQ_DESTROY mailbox failed with "
16692 "status x%x add_status x%x, mbx status x%x\n",
16693 shdr_status, shdr_add_status, rc);
16696 /* Remove cq from any list */
16697 list_del_init(&cq->list);
16698 mempool_free(mbox, cq->phba->mbox_mem_pool);
16703 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16704 * @phba: HBA structure that indicates port to destroy a queue on.
16705 * @mq: The queue structure associated with the queue to destroy.
16707 * This function destroys a queue, as detailed in @mq by sending an mailbox
16708 * command, specific to the type of queue, to the HBA.
16710 * The @mq struct is used to get the queue ID of the queue to destroy.
16712 * On success this function will return a zero. If the queue destroy mailbox
16713 * command fails this function will return -ENXIO.
16716 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16718 LPFC_MBOXQ_t *mbox;
16719 int rc, length, status = 0;
16720 uint32_t shdr_status, shdr_add_status;
16721 union lpfc_sli4_cfg_shdr *shdr;
16723 /* sanity check on queue memory */
16726 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16729 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16730 sizeof(struct lpfc_sli4_cfg_mhdr));
16731 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16732 LPFC_MBOX_OPCODE_MQ_DESTROY,
16733 length, LPFC_SLI4_MBX_EMBED);
16734 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16736 mbox->vport = mq->phba->pport;
16737 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16738 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16739 /* The IOCTL status is embedded in the mailbox subheader. */
16740 shdr = (union lpfc_sli4_cfg_shdr *)
16741 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16742 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16743 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16744 if (shdr_status || shdr_add_status || rc) {
16745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16746 "2507 MQ_DESTROY mailbox failed with "
16747 "status x%x add_status x%x, mbx status x%x\n",
16748 shdr_status, shdr_add_status, rc);
16751 /* Remove mq from any list */
16752 list_del_init(&mq->list);
16753 mempool_free(mbox, mq->phba->mbox_mem_pool);
16758 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16759 * @phba: HBA structure that indicates port to destroy a queue on.
16760 * @wq: The queue structure associated with the queue to destroy.
16762 * This function destroys a queue, as detailed in @wq by sending an mailbox
16763 * command, specific to the type of queue, to the HBA.
16765 * The @wq struct is used to get the queue ID of the queue to destroy.
16767 * On success this function will return a zero. If the queue destroy mailbox
16768 * command fails this function will return -ENXIO.
16771 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16773 LPFC_MBOXQ_t *mbox;
16774 int rc, length, status = 0;
16775 uint32_t shdr_status, shdr_add_status;
16776 union lpfc_sli4_cfg_shdr *shdr;
16778 /* sanity check on queue memory */
16781 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16784 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16785 sizeof(struct lpfc_sli4_cfg_mhdr));
16786 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16787 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16788 length, LPFC_SLI4_MBX_EMBED);
16789 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16791 mbox->vport = wq->phba->pport;
16792 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16793 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16794 shdr = (union lpfc_sli4_cfg_shdr *)
16795 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16796 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16797 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16798 if (shdr_status || shdr_add_status || rc) {
16799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16800 "2508 WQ_DESTROY mailbox failed with "
16801 "status x%x add_status x%x, mbx status x%x\n",
16802 shdr_status, shdr_add_status, rc);
16805 /* Remove wq from any list */
16806 list_del_init(&wq->list);
16809 mempool_free(mbox, wq->phba->mbox_mem_pool);
16814 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16815 * @phba: HBA structure that indicates port to destroy a queue on.
16816 * @hrq: The queue structure associated with the queue to destroy.
16817 * @drq: The queue structure associated with the queue to destroy.
16819 * This function destroys a queue, as detailed in @rq by sending an mailbox
16820 * command, specific to the type of queue, to the HBA.
16822 * The @rq struct is used to get the queue ID of the queue to destroy.
16824 * On success this function will return a zero. If the queue destroy mailbox
16825 * command fails this function will return -ENXIO.
16828 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16829 struct lpfc_queue *drq)
16831 LPFC_MBOXQ_t *mbox;
16832 int rc, length, status = 0;
16833 uint32_t shdr_status, shdr_add_status;
16834 union lpfc_sli4_cfg_shdr *shdr;
16836 /* sanity check on queue memory */
16839 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16842 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16843 sizeof(struct lpfc_sli4_cfg_mhdr));
16844 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16845 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16846 length, LPFC_SLI4_MBX_EMBED);
16847 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16849 mbox->vport = hrq->phba->pport;
16850 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16851 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16852 /* The IOCTL status is embedded in the mailbox subheader. */
16853 shdr = (union lpfc_sli4_cfg_shdr *)
16854 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16855 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16856 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16857 if (shdr_status || shdr_add_status || rc) {
16858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16859 "2509 RQ_DESTROY mailbox failed with "
16860 "status x%x add_status x%x, mbx status x%x\n",
16861 shdr_status, shdr_add_status, rc);
16862 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16865 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16867 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16868 shdr = (union lpfc_sli4_cfg_shdr *)
16869 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16870 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16871 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16872 if (shdr_status || shdr_add_status || rc) {
16873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16874 "2510 RQ_DESTROY mailbox failed with "
16875 "status x%x add_status x%x, mbx status x%x\n",
16876 shdr_status, shdr_add_status, rc);
16879 list_del_init(&hrq->list);
16880 list_del_init(&drq->list);
16881 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16886 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16887 * @phba: The virtual port for which this call being executed.
16888 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16889 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16890 * @xritag: the xritag that ties this io to the SGL pages.
16892 * This routine will post the sgl pages for the IO that has the xritag
16893 * that is in the iocbq structure. The xritag is assigned during iocbq
16894 * creation and persists for as long as the driver is loaded.
16895 * if the caller has fewer than 256 scatter gather segments to map then
16896 * pdma_phys_addr1 should be 0.
16897 * If the caller needs to map more than 256 scatter gather segment then
16898 * pdma_phys_addr1 should be a valid physical address.
16899 * physical address for SGLs must be 64 byte aligned.
16900 * If you are going to map 2 SGL's then the first one must have 256 entries
16901 * the second sgl can have between 1 and 256 entries.
16905 * -ENXIO, -ENOMEM - Failure
16908 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16909 dma_addr_t pdma_phys_addr0,
16910 dma_addr_t pdma_phys_addr1,
16913 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16914 LPFC_MBOXQ_t *mbox;
16916 uint32_t shdr_status, shdr_add_status;
16918 union lpfc_sli4_cfg_shdr *shdr;
16920 if (xritag == NO_XRI) {
16921 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16922 "0364 Invalid param:\n");
16926 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16930 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16931 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16932 sizeof(struct lpfc_mbx_post_sgl_pages) -
16933 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16935 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16936 &mbox->u.mqe.un.post_sgl_pages;
16937 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16938 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16940 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16941 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16942 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16943 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16945 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16946 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16947 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16948 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16949 if (!phba->sli4_hba.intr_enable)
16950 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16952 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16953 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16955 /* The IOCTL status is embedded in the mailbox subheader. */
16956 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16957 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16958 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16959 if (!phba->sli4_hba.intr_enable)
16960 mempool_free(mbox, phba->mbox_mem_pool);
16961 else if (rc != MBX_TIMEOUT)
16962 mempool_free(mbox, phba->mbox_mem_pool);
16963 if (shdr_status || shdr_add_status || rc) {
16964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16965 "2511 POST_SGL mailbox failed with "
16966 "status x%x add_status x%x, mbx status x%x\n",
16967 shdr_status, shdr_add_status, rc);
16973 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16974 * @phba: pointer to lpfc hba data structure.
16976 * This routine is invoked to post rpi header templates to the
16977 * HBA consistent with the SLI-4 interface spec. This routine
16978 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16979 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16982 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16983 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16986 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16991 * Fetch the next logical xri. Because this index is logical,
16992 * the driver starts at 0 each time.
16994 spin_lock_irq(&phba->hbalock);
16995 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16996 phba->sli4_hba.max_cfg_param.max_xri, 0);
16997 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16998 spin_unlock_irq(&phba->hbalock);
17001 set_bit(xri, phba->sli4_hba.xri_bmask);
17002 phba->sli4_hba.max_cfg_param.xri_used++;
17004 spin_unlock_irq(&phba->hbalock);
17009 * lpfc_sli4_free_xri - Release an xri for reuse.
17010 * @phba: pointer to lpfc hba data structure.
17011 * @xri: xri to release.
17013 * This routine is invoked to release an xri to the pool of
17014 * available rpis maintained by the driver.
17017 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17019 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17020 phba->sli4_hba.max_cfg_param.xri_used--;
17025 * lpfc_sli4_free_xri - Release an xri for reuse.
17026 * @phba: pointer to lpfc hba data structure.
17027 * @xri: xri to release.
17029 * This routine is invoked to release an xri to the pool of
17030 * available rpis maintained by the driver.
17033 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17035 spin_lock_irq(&phba->hbalock);
17036 __lpfc_sli4_free_xri(phba, xri);
17037 spin_unlock_irq(&phba->hbalock);
17041 * lpfc_sli4_next_xritag - Get an xritag for the io
17042 * @phba: Pointer to HBA context object.
17044 * This function gets an xritag for the iocb. If there is no unused xritag
17045 * it will return 0xffff.
17046 * The function returns the allocated xritag if successful, else returns zero.
17047 * Zero is not a valid xritag.
17048 * The caller is not required to hold any lock.
17051 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17053 uint16_t xri_index;
17055 xri_index = lpfc_sli4_alloc_xri(phba);
17056 if (xri_index == NO_XRI)
17057 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17058 "2004 Failed to allocate XRI.last XRITAG is %d"
17059 " Max XRI is %d, Used XRI is %d\n",
17061 phba->sli4_hba.max_cfg_param.max_xri,
17062 phba->sli4_hba.max_cfg_param.xri_used);
17067 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17068 * @phba: pointer to lpfc hba data structure.
17069 * @post_sgl_list: pointer to els sgl entry list.
17070 * @post_cnt: number of els sgl entries on the list.
17072 * This routine is invoked to post a block of driver's sgl pages to the
17073 * HBA using non-embedded mailbox command. No Lock is held. This routine
17074 * is only called when the driver is loading and after all IO has been
17078 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17079 struct list_head *post_sgl_list,
17082 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17083 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17084 struct sgl_page_pairs *sgl_pg_pairs;
17086 LPFC_MBOXQ_t *mbox;
17087 uint32_t reqlen, alloclen, pg_pairs;
17089 uint16_t xritag_start = 0;
17091 uint32_t shdr_status, shdr_add_status;
17092 union lpfc_sli4_cfg_shdr *shdr;
17094 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17095 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17096 if (reqlen > SLI4_PAGE_SIZE) {
17097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17098 "2559 Block sgl registration required DMA "
17099 "size (%d) great than a page\n", reqlen);
17103 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17107 /* Allocate DMA memory and set up the non-embedded mailbox command */
17108 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17109 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17110 LPFC_SLI4_MBX_NEMBED);
17112 if (alloclen < reqlen) {
17113 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17114 "0285 Allocated DMA memory size (%d) is "
17115 "less than the requested DMA memory "
17116 "size (%d)\n", alloclen, reqlen);
17117 lpfc_sli4_mbox_cmd_free(phba, mbox);
17120 /* Set up the SGL pages in the non-embedded DMA pages */
17121 viraddr = mbox->sge_array->addr[0];
17122 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17123 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17126 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17127 /* Set up the sge entry */
17128 sgl_pg_pairs->sgl_pg0_addr_lo =
17129 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17130 sgl_pg_pairs->sgl_pg0_addr_hi =
17131 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17132 sgl_pg_pairs->sgl_pg1_addr_lo =
17133 cpu_to_le32(putPaddrLow(0));
17134 sgl_pg_pairs->sgl_pg1_addr_hi =
17135 cpu_to_le32(putPaddrHigh(0));
17137 /* Keep the first xritag on the list */
17139 xritag_start = sglq_entry->sli4_xritag;
17144 /* Complete initialization and perform endian conversion. */
17145 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17146 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17147 sgl->word0 = cpu_to_le32(sgl->word0);
17149 if (!phba->sli4_hba.intr_enable)
17150 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17152 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17153 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17155 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17156 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17157 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17158 if (!phba->sli4_hba.intr_enable)
17159 lpfc_sli4_mbox_cmd_free(phba, mbox);
17160 else if (rc != MBX_TIMEOUT)
17161 lpfc_sli4_mbox_cmd_free(phba, mbox);
17162 if (shdr_status || shdr_add_status || rc) {
17163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17164 "2513 POST_SGL_BLOCK mailbox command failed "
17165 "status x%x add_status x%x mbx status x%x\n",
17166 shdr_status, shdr_add_status, rc);
17173 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17174 * @phba: pointer to lpfc hba data structure.
17175 * @nblist: pointer to nvme buffer list.
17176 * @count: number of scsi buffers on the list.
17178 * This routine is invoked to post a block of @count scsi sgl pages from a
17179 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17184 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17187 struct lpfc_io_buf *lpfc_ncmd;
17188 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17189 struct sgl_page_pairs *sgl_pg_pairs;
17191 LPFC_MBOXQ_t *mbox;
17192 uint32_t reqlen, alloclen, pg_pairs;
17194 uint16_t xritag_start = 0;
17196 uint32_t shdr_status, shdr_add_status;
17197 dma_addr_t pdma_phys_bpl1;
17198 union lpfc_sli4_cfg_shdr *shdr;
17200 /* Calculate the requested length of the dma memory */
17201 reqlen = count * sizeof(struct sgl_page_pairs) +
17202 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17203 if (reqlen > SLI4_PAGE_SIZE) {
17204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17205 "6118 Block sgl registration required DMA "
17206 "size (%d) great than a page\n", reqlen);
17209 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17212 "6119 Failed to allocate mbox cmd memory\n");
17216 /* Allocate DMA memory and set up the non-embedded mailbox command */
17217 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17218 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17219 reqlen, LPFC_SLI4_MBX_NEMBED);
17221 if (alloclen < reqlen) {
17222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17223 "6120 Allocated DMA memory size (%d) is "
17224 "less than the requested DMA memory "
17225 "size (%d)\n", alloclen, reqlen);
17226 lpfc_sli4_mbox_cmd_free(phba, mbox);
17230 /* Get the first SGE entry from the non-embedded DMA memory */
17231 viraddr = mbox->sge_array->addr[0];
17233 /* Set up the SGL pages in the non-embedded DMA pages */
17234 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17235 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17238 list_for_each_entry(lpfc_ncmd, nblist, list) {
17239 /* Set up the sge entry */
17240 sgl_pg_pairs->sgl_pg0_addr_lo =
17241 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17242 sgl_pg_pairs->sgl_pg0_addr_hi =
17243 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17244 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17245 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17248 pdma_phys_bpl1 = 0;
17249 sgl_pg_pairs->sgl_pg1_addr_lo =
17250 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17251 sgl_pg_pairs->sgl_pg1_addr_hi =
17252 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17253 /* Keep the first xritag on the list */
17255 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17259 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17260 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17261 /* Perform endian conversion if necessary */
17262 sgl->word0 = cpu_to_le32(sgl->word0);
17264 if (!phba->sli4_hba.intr_enable) {
17265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17267 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17268 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17270 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17271 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17272 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17273 if (!phba->sli4_hba.intr_enable)
17274 lpfc_sli4_mbox_cmd_free(phba, mbox);
17275 else if (rc != MBX_TIMEOUT)
17276 lpfc_sli4_mbox_cmd_free(phba, mbox);
17277 if (shdr_status || shdr_add_status || rc) {
17278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17279 "6125 POST_SGL_BLOCK mailbox command failed "
17280 "status x%x add_status x%x mbx status x%x\n",
17281 shdr_status, shdr_add_status, rc);
17288 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17289 * @phba: pointer to lpfc hba data structure.
17290 * @post_nblist: pointer to the nvme buffer list.
17291 * @sb_count: number of nvme buffers.
17293 * This routine walks a list of nvme buffers that was passed in. It attempts
17294 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17295 * uses the non-embedded SGL block post mailbox commands to post to the port.
17296 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17297 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17298 * must be local list, thus no lock is needed when manipulate the list.
17300 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17303 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17304 struct list_head *post_nblist, int sb_count)
17306 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17307 int status, sgl_size;
17308 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17309 dma_addr_t pdma_phys_sgl1;
17310 int last_xritag = NO_XRI;
17312 LIST_HEAD(prep_nblist);
17313 LIST_HEAD(blck_nblist);
17314 LIST_HEAD(nvme_nblist);
17320 sgl_size = phba->cfg_sg_dma_buf_size;
17321 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17322 list_del_init(&lpfc_ncmd->list);
17324 if ((last_xritag != NO_XRI) &&
17325 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17326 /* a hole in xri block, form a sgl posting block */
17327 list_splice_init(&prep_nblist, &blck_nblist);
17328 post_cnt = block_cnt - 1;
17329 /* prepare list for next posting block */
17330 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17333 /* prepare list for next posting block */
17334 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17335 /* enough sgls for non-embed sgl mbox command */
17336 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17337 list_splice_init(&prep_nblist, &blck_nblist);
17338 post_cnt = block_cnt;
17343 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17345 /* end of repost sgl list condition for NVME buffers */
17346 if (num_posting == sb_count) {
17347 if (post_cnt == 0) {
17348 /* last sgl posting block */
17349 list_splice_init(&prep_nblist, &blck_nblist);
17350 post_cnt = block_cnt;
17351 } else if (block_cnt == 1) {
17352 /* last single sgl with non-contiguous xri */
17353 if (sgl_size > SGL_PAGE_SIZE)
17355 lpfc_ncmd->dma_phys_sgl +
17358 pdma_phys_sgl1 = 0;
17359 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17360 status = lpfc_sli4_post_sgl(
17361 phba, lpfc_ncmd->dma_phys_sgl,
17362 pdma_phys_sgl1, cur_xritag);
17364 /* Post error. Buffer unavailable. */
17365 lpfc_ncmd->flags |=
17366 LPFC_SBUF_NOT_POSTED;
17368 /* Post success. Bffer available. */
17369 lpfc_ncmd->flags &=
17370 ~LPFC_SBUF_NOT_POSTED;
17371 lpfc_ncmd->status = IOSTAT_SUCCESS;
17374 /* success, put on NVME buffer sgl list */
17375 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17379 /* continue until a nembed page worth of sgls */
17383 /* post block of NVME buffer list sgls */
17384 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17387 /* don't reset xirtag due to hole in xri block */
17388 if (block_cnt == 0)
17389 last_xritag = NO_XRI;
17391 /* reset NVME buffer post count for next round of posting */
17394 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17395 while (!list_empty(&blck_nblist)) {
17396 list_remove_head(&blck_nblist, lpfc_ncmd,
17397 struct lpfc_io_buf, list);
17399 /* Post error. Mark buffer unavailable. */
17400 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17402 /* Post success, Mark buffer available. */
17403 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17404 lpfc_ncmd->status = IOSTAT_SUCCESS;
17407 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17410 /* Push NVME buffers with sgl posted to the available list */
17411 lpfc_io_buf_replenish(phba, &nvme_nblist);
17417 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17418 * @phba: pointer to lpfc_hba struct that the frame was received on
17419 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17421 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17422 * valid type of frame that the LPFC driver will handle. This function will
17423 * return a zero if the frame is a valid frame or a non zero value when the
17424 * frame does not pass the check.
17427 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17429 /* make rctl_names static to save stack space */
17430 struct fc_vft_header *fc_vft_hdr;
17431 uint32_t *header = (uint32_t *) fc_hdr;
17433 #define FC_RCTL_MDS_DIAGS 0xF4
17435 switch (fc_hdr->fh_r_ctl) {
17436 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17437 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17438 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17439 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17440 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17441 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17442 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17443 case FC_RCTL_DD_CMD_STATUS: /* command status */
17444 case FC_RCTL_ELS_REQ: /* extended link services request */
17445 case FC_RCTL_ELS_REP: /* extended link services reply */
17446 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17447 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17448 case FC_RCTL_BA_ABTS: /* basic link service abort */
17449 case FC_RCTL_BA_RMC: /* remove connection */
17450 case FC_RCTL_BA_ACC: /* basic accept */
17451 case FC_RCTL_BA_RJT: /* basic reject */
17452 case FC_RCTL_BA_PRMT:
17453 case FC_RCTL_ACK_1: /* acknowledge_1 */
17454 case FC_RCTL_ACK_0: /* acknowledge_0 */
17455 case FC_RCTL_P_RJT: /* port reject */
17456 case FC_RCTL_F_RJT: /* fabric reject */
17457 case FC_RCTL_P_BSY: /* port busy */
17458 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17459 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17460 case FC_RCTL_LCR: /* link credit reset */
17461 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17462 case FC_RCTL_END: /* end */
17464 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17465 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17466 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17467 return lpfc_fc_frame_check(phba, fc_hdr);
17468 case FC_RCTL_BA_NOP: /* basic link service NOP */
17473 switch (fc_hdr->fh_type) {
17486 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17487 "2538 Received frame rctl:x%x, type:x%x, "
17488 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17489 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17490 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17491 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17492 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17493 be32_to_cpu(header[6]));
17496 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17497 "2539 Dropped frame rctl:x%x type:x%x\n",
17498 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17503 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17504 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17506 * This function processes the FC header to retrieve the VFI from the VF
17507 * header, if one exists. This function will return the VFI if one exists
17508 * or 0 if no VSAN Header exists.
17511 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17513 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17515 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17517 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17521 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17522 * @phba: Pointer to the HBA structure to search for the vport on
17523 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17524 * @fcfi: The FC Fabric ID that the frame came from
17525 * @did: Destination ID to match against
17527 * This function searches the @phba for a vport that matches the content of the
17528 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17529 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17530 * returns the matching vport pointer or NULL if unable to match frame to a
17533 static struct lpfc_vport *
17534 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17535 uint16_t fcfi, uint32_t did)
17537 struct lpfc_vport **vports;
17538 struct lpfc_vport *vport = NULL;
17541 if (did == Fabric_DID)
17542 return phba->pport;
17543 if ((phba->pport->fc_flag & FC_PT2PT) &&
17544 !(phba->link_state == LPFC_HBA_READY))
17545 return phba->pport;
17547 vports = lpfc_create_vport_work_array(phba);
17548 if (vports != NULL) {
17549 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17550 if (phba->fcf.fcfi == fcfi &&
17551 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17552 vports[i]->fc_myDID == did) {
17558 lpfc_destroy_vport_work_array(phba, vports);
17563 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17564 * @vport: The vport to work on.
17566 * This function updates the receive sequence time stamp for this vport. The
17567 * receive sequence time stamp indicates the time that the last frame of the
17568 * the sequence that has been idle for the longest amount of time was received.
17569 * the driver uses this time stamp to indicate if any received sequences have
17573 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17575 struct lpfc_dmabuf *h_buf;
17576 struct hbq_dmabuf *dmabuf = NULL;
17578 /* get the oldest sequence on the rcv list */
17579 h_buf = list_get_first(&vport->rcv_buffer_list,
17580 struct lpfc_dmabuf, list);
17583 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17584 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17588 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17589 * @vport: The vport that the received sequences were sent to.
17591 * This function cleans up all outstanding received sequences. This is called
17592 * by the driver when a link event or user action invalidates all the received
17596 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17598 struct lpfc_dmabuf *h_buf, *hnext;
17599 struct lpfc_dmabuf *d_buf, *dnext;
17600 struct hbq_dmabuf *dmabuf = NULL;
17602 /* start with the oldest sequence on the rcv list */
17603 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17604 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17605 list_del_init(&dmabuf->hbuf.list);
17606 list_for_each_entry_safe(d_buf, dnext,
17607 &dmabuf->dbuf.list, list) {
17608 list_del_init(&d_buf->list);
17609 lpfc_in_buf_free(vport->phba, d_buf);
17611 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17616 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17617 * @vport: The vport that the received sequences were sent to.
17619 * This function determines whether any received sequences have timed out by
17620 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17621 * indicates that there is at least one timed out sequence this routine will
17622 * go through the received sequences one at a time from most inactive to most
17623 * active to determine which ones need to be cleaned up. Once it has determined
17624 * that a sequence needs to be cleaned up it will simply free up the resources
17625 * without sending an abort.
17628 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17630 struct lpfc_dmabuf *h_buf, *hnext;
17631 struct lpfc_dmabuf *d_buf, *dnext;
17632 struct hbq_dmabuf *dmabuf = NULL;
17633 unsigned long timeout;
17634 int abort_count = 0;
17636 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17637 vport->rcv_buffer_time_stamp);
17638 if (list_empty(&vport->rcv_buffer_list) ||
17639 time_before(jiffies, timeout))
17641 /* start with the oldest sequence on the rcv list */
17642 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17643 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17644 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17645 dmabuf->time_stamp);
17646 if (time_before(jiffies, timeout))
17649 list_del_init(&dmabuf->hbuf.list);
17650 list_for_each_entry_safe(d_buf, dnext,
17651 &dmabuf->dbuf.list, list) {
17652 list_del_init(&d_buf->list);
17653 lpfc_in_buf_free(vport->phba, d_buf);
17655 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17658 lpfc_update_rcv_time_stamp(vport);
17662 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17663 * @vport: pointer to a vitural port
17664 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17666 * This function searches through the existing incomplete sequences that have
17667 * been sent to this @vport. If the frame matches one of the incomplete
17668 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17669 * make up that sequence. If no sequence is found that matches this frame then
17670 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17671 * This function returns a pointer to the first dmabuf in the sequence list that
17672 * the frame was linked to.
17674 static struct hbq_dmabuf *
17675 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17677 struct fc_frame_header *new_hdr;
17678 struct fc_frame_header *temp_hdr;
17679 struct lpfc_dmabuf *d_buf;
17680 struct lpfc_dmabuf *h_buf;
17681 struct hbq_dmabuf *seq_dmabuf = NULL;
17682 struct hbq_dmabuf *temp_dmabuf = NULL;
17685 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17686 dmabuf->time_stamp = jiffies;
17687 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17689 /* Use the hdr_buf to find the sequence that this frame belongs to */
17690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17696 /* found a pending sequence that matches this frame */
17697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17702 * This indicates first frame received for this sequence.
17703 * Queue the buffer on the vport's rcv_buffer_list.
17705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17706 lpfc_update_rcv_time_stamp(vport);
17709 temp_hdr = seq_dmabuf->hbuf.virt;
17710 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17711 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17712 list_del_init(&seq_dmabuf->hbuf.list);
17713 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17714 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17715 lpfc_update_rcv_time_stamp(vport);
17718 /* move this sequence to the tail to indicate a young sequence */
17719 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17720 seq_dmabuf->time_stamp = jiffies;
17721 lpfc_update_rcv_time_stamp(vport);
17722 if (list_empty(&seq_dmabuf->dbuf.list)) {
17723 temp_hdr = dmabuf->hbuf.virt;
17724 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17727 /* find the correct place in the sequence to insert this frame */
17728 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17730 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17731 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17733 * If the frame's sequence count is greater than the frame on
17734 * the list then insert the frame right after this frame
17736 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17737 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17738 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17743 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17745 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17754 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17755 * @vport: pointer to a vitural port
17756 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17758 * This function tries to abort from the partially assembed sequence, described
17759 * by the information from basic abbort @dmabuf. It checks to see whether such
17760 * partially assembled sequence held by the driver. If so, it shall free up all
17761 * the frames from the partially assembled sequence.
17764 * true -- if there is matching partially assembled sequence present and all
17765 * the frames freed with the sequence;
17766 * false -- if there is no matching partially assembled sequence present so
17767 * nothing got aborted in the lower layer driver
17770 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17771 struct hbq_dmabuf *dmabuf)
17773 struct fc_frame_header *new_hdr;
17774 struct fc_frame_header *temp_hdr;
17775 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17776 struct hbq_dmabuf *seq_dmabuf = NULL;
17778 /* Use the hdr_buf to find the sequence that matches this frame */
17779 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17780 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17781 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17782 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17783 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17784 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17785 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17786 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17788 /* found a pending sequence that matches this frame */
17789 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17793 /* Free up all the frames from the partially assembled sequence */
17795 list_for_each_entry_safe(d_buf, n_buf,
17796 &seq_dmabuf->dbuf.list, list) {
17797 list_del_init(&d_buf->list);
17798 lpfc_in_buf_free(vport->phba, d_buf);
17806 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17807 * @vport: pointer to a vitural port
17808 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17810 * This function tries to abort from the assembed sequence from upper level
17811 * protocol, described by the information from basic abbort @dmabuf. It
17812 * checks to see whether such pending context exists at upper level protocol.
17813 * If so, it shall clean up the pending context.
17816 * true -- if there is matching pending context of the sequence cleaned
17818 * false -- if there is no matching pending context of the sequence present
17822 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17824 struct lpfc_hba *phba = vport->phba;
17827 /* Accepting abort at ulp with SLI4 only */
17828 if (phba->sli_rev < LPFC_SLI_REV4)
17831 /* Register all caring upper level protocols to attend abort */
17832 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17840 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17841 * @phba: Pointer to HBA context object.
17842 * @cmd_iocbq: pointer to the command iocbq structure.
17843 * @rsp_iocbq: pointer to the response iocbq structure.
17845 * This function handles the sequence abort response iocb command complete
17846 * event. It properly releases the memory allocated to the sequence abort
17850 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17851 struct lpfc_iocbq *cmd_iocbq,
17852 struct lpfc_iocbq *rsp_iocbq)
17854 struct lpfc_nodelist *ndlp;
17857 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17858 lpfc_nlp_put(ndlp);
17859 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17862 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17863 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17864 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17865 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17866 rsp_iocbq->iocb.ulpStatus,
17867 rsp_iocbq->iocb.un.ulpWord[4]);
17871 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17872 * @phba: Pointer to HBA context object.
17873 * @xri: xri id in transaction.
17875 * This function validates the xri maps to the known range of XRIs allocated an
17876 * used by the driver.
17879 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17884 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17885 if (xri == phba->sli4_hba.xri_ids[i])
17892 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17893 * @vport: pointer to a vitural port.
17894 * @fc_hdr: pointer to a FC frame header.
17895 * @aborted: was the partially assembled receive sequence successfully aborted
17897 * This function sends a basic response to a previous unsol sequence abort
17898 * event after aborting the sequence handling.
17901 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17902 struct fc_frame_header *fc_hdr, bool aborted)
17904 struct lpfc_hba *phba = vport->phba;
17905 struct lpfc_iocbq *ctiocb = NULL;
17906 struct lpfc_nodelist *ndlp;
17907 uint16_t oxid, rxid, xri, lxri;
17908 uint32_t sid, fctl;
17912 if (!lpfc_is_link_up(phba))
17915 sid = sli4_sid_from_fc_hdr(fc_hdr);
17916 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17917 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17919 ndlp = lpfc_findnode_did(vport, sid);
17921 ndlp = lpfc_nlp_init(vport, sid);
17923 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17924 "1268 Failed to allocate ndlp for "
17925 "oxid:x%x SID:x%x\n", oxid, sid);
17928 /* Put ndlp onto pport node list */
17929 lpfc_enqueue_node(vport, ndlp);
17930 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17931 /* re-setup ndlp without removing from node list */
17932 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17934 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17935 "3275 Failed to active ndlp found "
17936 "for oxid:x%x SID:x%x\n", oxid, sid);
17941 /* Allocate buffer for rsp iocb */
17942 ctiocb = lpfc_sli_get_iocbq(phba);
17946 /* Extract the F_CTL field from FC_HDR */
17947 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17949 icmd = &ctiocb->iocb;
17950 icmd->un.xseq64.bdl.bdeSize = 0;
17951 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17952 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17953 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17954 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17956 /* Fill in the rest of iocb fields */
17957 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17958 icmd->ulpBdeCount = 0;
17960 icmd->ulpClass = CLASS3;
17961 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17962 ctiocb->context1 = lpfc_nlp_get(ndlp);
17964 ctiocb->vport = phba->pport;
17965 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17966 ctiocb->sli4_lxritag = NO_XRI;
17967 ctiocb->sli4_xritag = NO_XRI;
17969 if (fctl & FC_FC_EX_CTX)
17970 /* Exchange responder sent the abort so we
17976 lxri = lpfc_sli4_xri_inrange(phba, xri);
17977 if (lxri != NO_XRI)
17978 lpfc_set_rrq_active(phba, ndlp, lxri,
17979 (xri == oxid) ? rxid : oxid, 0);
17980 /* For BA_ABTS from exchange responder, if the logical xri with
17981 * the oxid maps to the FCP XRI range, the port no longer has
17982 * that exchange context, send a BLS_RJT. Override the IOCB for
17985 if ((fctl & FC_FC_EX_CTX) &&
17986 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17987 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17988 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17989 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17990 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17993 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17994 * the driver no longer has that exchange, send a BLS_RJT. Override
17995 * the IOCB for a BA_RJT.
17997 if (aborted == false) {
17998 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17999 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18000 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18001 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18004 if (fctl & FC_FC_EX_CTX) {
18005 /* ABTS sent by responder to CT exchange, construction
18006 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18007 * field and RX_ID from ABTS for RX_ID field.
18009 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18011 /* ABTS sent by initiator to CT exchange, construction
18012 * of BA_ACC will need to allocate a new XRI as for the
18015 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18017 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18018 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18020 /* Xmit CT abts response on exchange <xid> */
18021 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18022 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18023 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18025 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18026 if (rc == IOCB_ERROR) {
18027 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18028 "2925 Failed to issue CT ABTS RSP x%x on "
18029 "xri x%x, Data x%x\n",
18030 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18032 lpfc_nlp_put(ndlp);
18033 ctiocb->context1 = NULL;
18034 lpfc_sli_release_iocbq(phba, ctiocb);
18039 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18040 * @vport: Pointer to the vport on which this sequence was received
18041 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18043 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18044 * receive sequence is only partially assembed by the driver, it shall abort
18045 * the partially assembled frames for the sequence. Otherwise, if the
18046 * unsolicited receive sequence has been completely assembled and passed to
18047 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18048 * unsolicited sequence has been aborted. After that, it will issue a basic
18049 * accept to accept the abort.
18052 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18053 struct hbq_dmabuf *dmabuf)
18055 struct lpfc_hba *phba = vport->phba;
18056 struct fc_frame_header fc_hdr;
18060 /* Make a copy of fc_hdr before the dmabuf being released */
18061 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18062 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18064 if (fctl & FC_FC_EX_CTX) {
18065 /* ABTS by responder to exchange, no cleanup needed */
18068 /* ABTS by initiator to exchange, need to do cleanup */
18069 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18070 if (aborted == false)
18071 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18073 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18075 if (phba->nvmet_support) {
18076 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18080 /* Respond with BA_ACC or BA_RJT accordingly */
18081 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18085 * lpfc_seq_complete - Indicates if a sequence is complete
18086 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18088 * This function checks the sequence, starting with the frame described by
18089 * @dmabuf, to see if all the frames associated with this sequence are present.
18090 * the frames associated with this sequence are linked to the @dmabuf using the
18091 * dbuf list. This function looks for two major things. 1) That the first frame
18092 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18093 * set. 3) That there are no holes in the sequence count. The function will
18094 * return 1 when the sequence is complete, otherwise it will return 0.
18097 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18099 struct fc_frame_header *hdr;
18100 struct lpfc_dmabuf *d_buf;
18101 struct hbq_dmabuf *seq_dmabuf;
18105 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18106 /* make sure first fame of sequence has a sequence count of zero */
18107 if (hdr->fh_seq_cnt != seq_count)
18109 fctl = (hdr->fh_f_ctl[0] << 16 |
18110 hdr->fh_f_ctl[1] << 8 |
18112 /* If last frame of sequence we can return success. */
18113 if (fctl & FC_FC_END_SEQ)
18115 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18116 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18117 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18118 /* If there is a hole in the sequence count then fail. */
18119 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18121 fctl = (hdr->fh_f_ctl[0] << 16 |
18122 hdr->fh_f_ctl[1] << 8 |
18124 /* If last frame of sequence we can return success. */
18125 if (fctl & FC_FC_END_SEQ)
18132 * lpfc_prep_seq - Prep sequence for ULP processing
18133 * @vport: Pointer to the vport on which this sequence was received
18134 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18136 * This function takes a sequence, described by a list of frames, and creates
18137 * a list of iocbq structures to describe the sequence. This iocbq list will be
18138 * used to issue to the generic unsolicited sequence handler. This routine
18139 * returns a pointer to the first iocbq in the list. If the function is unable
18140 * to allocate an iocbq then it throw out the received frames that were not
18141 * able to be described and return a pointer to the first iocbq. If unable to
18142 * allocate any iocbqs (including the first) this function will return NULL.
18144 static struct lpfc_iocbq *
18145 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18147 struct hbq_dmabuf *hbq_buf;
18148 struct lpfc_dmabuf *d_buf, *n_buf;
18149 struct lpfc_iocbq *first_iocbq, *iocbq;
18150 struct fc_frame_header *fc_hdr;
18152 uint32_t len, tot_len;
18153 struct ulp_bde64 *pbde;
18155 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18156 /* remove from receive buffer list */
18157 list_del_init(&seq_dmabuf->hbuf.list);
18158 lpfc_update_rcv_time_stamp(vport);
18159 /* get the Remote Port's SID */
18160 sid = sli4_sid_from_fc_hdr(fc_hdr);
18162 /* Get an iocbq struct to fill in. */
18163 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18165 /* Initialize the first IOCB. */
18166 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18167 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18168 first_iocbq->vport = vport;
18170 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18171 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18172 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18173 first_iocbq->iocb.un.rcvels.parmRo =
18174 sli4_did_from_fc_hdr(fc_hdr);
18175 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18177 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18178 first_iocbq->iocb.ulpContext = NO_XRI;
18179 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18180 be16_to_cpu(fc_hdr->fh_ox_id);
18181 /* iocbq is prepped for internal consumption. Physical vpi. */
18182 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18183 vport->phba->vpi_ids[vport->vpi];
18184 /* put the first buffer into the first IOCBq */
18185 tot_len = bf_get(lpfc_rcqe_length,
18186 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18188 first_iocbq->context2 = &seq_dmabuf->dbuf;
18189 first_iocbq->context3 = NULL;
18190 first_iocbq->iocb.ulpBdeCount = 1;
18191 if (tot_len > LPFC_DATA_BUF_SIZE)
18192 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18193 LPFC_DATA_BUF_SIZE;
18195 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18197 first_iocbq->iocb.un.rcvels.remoteID = sid;
18199 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18201 iocbq = first_iocbq;
18203 * Each IOCBq can have two Buffers assigned, so go through the list
18204 * of buffers for this sequence and save two buffers in each IOCBq
18206 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18208 lpfc_in_buf_free(vport->phba, d_buf);
18211 if (!iocbq->context3) {
18212 iocbq->context3 = d_buf;
18213 iocbq->iocb.ulpBdeCount++;
18214 /* We need to get the size out of the right CQE */
18215 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18216 len = bf_get(lpfc_rcqe_length,
18217 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18218 pbde = (struct ulp_bde64 *)
18219 &iocbq->iocb.unsli3.sli3Words[4];
18220 if (len > LPFC_DATA_BUF_SIZE)
18221 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18223 pbde->tus.f.bdeSize = len;
18225 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18228 iocbq = lpfc_sli_get_iocbq(vport->phba);
18231 first_iocbq->iocb.ulpStatus =
18232 IOSTAT_FCP_RSP_ERROR;
18233 first_iocbq->iocb.un.ulpWord[4] =
18234 IOERR_NO_RESOURCES;
18236 lpfc_in_buf_free(vport->phba, d_buf);
18239 /* We need to get the size out of the right CQE */
18240 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18241 len = bf_get(lpfc_rcqe_length,
18242 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18243 iocbq->context2 = d_buf;
18244 iocbq->context3 = NULL;
18245 iocbq->iocb.ulpBdeCount = 1;
18246 if (len > LPFC_DATA_BUF_SIZE)
18247 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18248 LPFC_DATA_BUF_SIZE;
18250 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18253 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18255 iocbq->iocb.un.rcvels.remoteID = sid;
18256 list_add_tail(&iocbq->list, &first_iocbq->list);
18259 /* Free the sequence's header buffer */
18261 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18263 return first_iocbq;
18267 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18268 struct hbq_dmabuf *seq_dmabuf)
18270 struct fc_frame_header *fc_hdr;
18271 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18272 struct lpfc_hba *phba = vport->phba;
18274 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18275 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18278 "2707 Ring %d handler: Failed to allocate "
18279 "iocb Rctl x%x Type x%x received\n",
18281 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18284 if (!lpfc_complete_unsol_iocb(phba,
18285 phba->sli4_hba.els_wq->pring,
18286 iocbq, fc_hdr->fh_r_ctl,
18287 fc_hdr->fh_type)) {
18288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18289 "2540 Ring %d handler: unexpected Rctl "
18290 "x%x Type x%x received\n",
18292 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18293 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
18296 /* Free iocb created in lpfc_prep_seq */
18297 list_for_each_entry_safe(curr_iocb, next_iocb,
18298 &iocbq->list, list) {
18299 list_del_init(&curr_iocb->list);
18300 lpfc_sli_release_iocbq(phba, curr_iocb);
18302 lpfc_sli_release_iocbq(phba, iocbq);
18306 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18307 struct lpfc_iocbq *rspiocb)
18309 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18311 if (pcmd && pcmd->virt)
18312 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18314 lpfc_sli_release_iocbq(phba, cmdiocb);
18315 lpfc_drain_txq(phba);
18319 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18320 struct hbq_dmabuf *dmabuf)
18322 struct fc_frame_header *fc_hdr;
18323 struct lpfc_hba *phba = vport->phba;
18324 struct lpfc_iocbq *iocbq = NULL;
18325 union lpfc_wqe *wqe;
18326 struct lpfc_dmabuf *pcmd = NULL;
18327 uint32_t frame_len;
18329 unsigned long iflags;
18331 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18332 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18334 /* Send the received frame back */
18335 iocbq = lpfc_sli_get_iocbq(phba);
18337 /* Queue cq event and wakeup worker thread to process it */
18338 spin_lock_irqsave(&phba->hbalock, iflags);
18339 list_add_tail(&dmabuf->cq_event.list,
18340 &phba->sli4_hba.sp_queue_event);
18341 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18342 spin_unlock_irqrestore(&phba->hbalock, iflags);
18343 lpfc_worker_wake_up(phba);
18347 /* Allocate buffer for command payload */
18348 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18350 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18352 if (!pcmd || !pcmd->virt)
18355 INIT_LIST_HEAD(&pcmd->list);
18357 /* copyin the payload */
18358 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18360 /* fill in BDE's for command */
18361 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18362 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18363 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18364 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18366 iocbq->context2 = pcmd;
18367 iocbq->vport = vport;
18368 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18369 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18372 * Setup rest of the iocb as though it were a WQE
18373 * Build the SEND_FRAME WQE
18375 wqe = (union lpfc_wqe *)&iocbq->iocb;
18377 wqe->send_frame.frame_len = frame_len;
18378 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18379 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18380 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18381 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18382 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18383 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18385 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18386 iocbq->iocb.ulpLe = 1;
18387 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18388 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18389 if (rc == IOCB_ERROR)
18392 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18397 "2023 Unable to process MDS loopback frame\n");
18398 if (pcmd && pcmd->virt)
18399 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18402 lpfc_sli_release_iocbq(phba, iocbq);
18403 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18407 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18408 * @phba: Pointer to HBA context object.
18409 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18411 * This function is called with no lock held. This function processes all
18412 * the received buffers and gives it to upper layers when a received buffer
18413 * indicates that it is the final frame in the sequence. The interrupt
18414 * service routine processes received buffers at interrupt contexts.
18415 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18416 * appropriate receive function when the final frame in a sequence is received.
18419 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18420 struct hbq_dmabuf *dmabuf)
18422 struct hbq_dmabuf *seq_dmabuf;
18423 struct fc_frame_header *fc_hdr;
18424 struct lpfc_vport *vport;
18428 /* Process each received buffer */
18429 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18431 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18432 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18433 vport = phba->pport;
18434 /* Handle MDS Loopback frames */
18435 if (!(phba->pport->load_flag & FC_UNLOADING))
18436 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18438 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18442 /* check to see if this a valid type of frame */
18443 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18444 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18448 if ((bf_get(lpfc_cqe_code,
18449 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18450 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18451 &dmabuf->cq_event.cqe.rcqe_cmpl);
18453 fcfi = bf_get(lpfc_rcqe_fcf_id,
18454 &dmabuf->cq_event.cqe.rcqe_cmpl);
18456 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18457 vport = phba->pport;
18458 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18459 "2023 MDS Loopback %d bytes\n",
18460 bf_get(lpfc_rcqe_length,
18461 &dmabuf->cq_event.cqe.rcqe_cmpl));
18462 /* Handle MDS Loopback frames */
18463 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18467 /* d_id this frame is directed to */
18468 did = sli4_did_from_fc_hdr(fc_hdr);
18470 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18472 /* throw out the frame */
18473 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18477 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18478 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18479 (did != Fabric_DID)) {
18481 * Throw out the frame if we are not pt2pt.
18482 * The pt2pt protocol allows for discovery frames
18483 * to be received without a registered VPI.
18485 if (!(vport->fc_flag & FC_PT2PT) ||
18486 (phba->link_state == LPFC_HBA_READY)) {
18487 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18492 /* Handle the basic abort sequence (BA_ABTS) event */
18493 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18494 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18498 /* Link this frame */
18499 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18501 /* unable to add frame to vport - throw it out */
18502 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18505 /* If not last frame in sequence continue processing frames. */
18506 if (!lpfc_seq_complete(seq_dmabuf))
18509 /* Send the complete sequence to the upper layer protocol */
18510 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18514 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18515 * @phba: pointer to lpfc hba data structure.
18517 * This routine is invoked to post rpi header templates to the
18518 * HBA consistent with the SLI-4 interface spec. This routine
18519 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18520 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18522 * This routine does not require any locks. It's usage is expected
18523 * to be driver load or reset recovery when the driver is
18528 * -EIO - The mailbox failed to complete successfully.
18529 * When this error occurs, the driver is not guaranteed
18530 * to have any rpi regions posted to the device and
18531 * must either attempt to repost the regions or take a
18535 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18537 struct lpfc_rpi_hdr *rpi_page;
18541 /* SLI4 ports that support extents do not require RPI headers. */
18542 if (!phba->sli4_hba.rpi_hdrs_in_use)
18544 if (phba->sli4_hba.extents_in_use)
18547 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18549 * Assign the rpi headers a physical rpi only if the driver
18550 * has not initialized those resources. A port reset only
18551 * needs the headers posted.
18553 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18555 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18557 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18558 if (rc != MBX_SUCCESS) {
18559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18560 "2008 Error %d posting all rpi "
18568 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18569 LPFC_RPI_RSRC_RDY);
18574 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18575 * @phba: pointer to lpfc hba data structure.
18576 * @rpi_page: pointer to the rpi memory region.
18578 * This routine is invoked to post a single rpi header to the
18579 * HBA consistent with the SLI-4 interface spec. This memory region
18580 * maps up to 64 rpi context regions.
18584 * -ENOMEM - No available memory
18585 * -EIO - The mailbox failed to complete successfully.
18588 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18590 LPFC_MBOXQ_t *mboxq;
18591 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18593 uint32_t shdr_status, shdr_add_status;
18594 union lpfc_sli4_cfg_shdr *shdr;
18596 /* SLI4 ports that support extents do not require RPI headers. */
18597 if (!phba->sli4_hba.rpi_hdrs_in_use)
18599 if (phba->sli4_hba.extents_in_use)
18602 /* The port is notified of the header region via a mailbox command. */
18603 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18606 "2001 Unable to allocate memory for issuing "
18607 "SLI_CONFIG_SPECIAL mailbox command\n");
18611 /* Post all rpi memory regions to the port. */
18612 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18613 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18614 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18615 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18616 sizeof(struct lpfc_sli4_cfg_mhdr),
18617 LPFC_SLI4_MBX_EMBED);
18620 /* Post the physical rpi to the port for this rpi header. */
18621 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18622 rpi_page->start_rpi);
18623 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18624 hdr_tmpl, rpi_page->page_count);
18626 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18627 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18628 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18629 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18630 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18631 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18632 mempool_free(mboxq, phba->mbox_mem_pool);
18633 if (shdr_status || shdr_add_status || rc) {
18634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18635 "2514 POST_RPI_HDR mailbox failed with "
18636 "status x%x add_status x%x, mbx status x%x\n",
18637 shdr_status, shdr_add_status, rc);
18641 * The next_rpi stores the next logical module-64 rpi value used
18642 * to post physical rpis in subsequent rpi postings.
18644 spin_lock_irq(&phba->hbalock);
18645 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18646 spin_unlock_irq(&phba->hbalock);
18652 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18653 * @phba: pointer to lpfc hba data structure.
18655 * This routine is invoked to post rpi header templates to the
18656 * HBA consistent with the SLI-4 interface spec. This routine
18657 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18658 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18661 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18662 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18665 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18668 uint16_t max_rpi, rpi_limit;
18669 uint16_t rpi_remaining, lrpi = 0;
18670 struct lpfc_rpi_hdr *rpi_hdr;
18671 unsigned long iflag;
18674 * Fetch the next logical rpi. Because this index is logical,
18675 * the driver starts at 0 each time.
18677 spin_lock_irqsave(&phba->hbalock, iflag);
18678 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18679 rpi_limit = phba->sli4_hba.next_rpi;
18681 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18682 if (rpi >= rpi_limit)
18683 rpi = LPFC_RPI_ALLOC_ERROR;
18685 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18686 phba->sli4_hba.max_cfg_param.rpi_used++;
18687 phba->sli4_hba.rpi_count++;
18689 lpfc_printf_log(phba, KERN_INFO,
18690 LOG_NODE | LOG_DISCOVERY,
18691 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18692 (int) rpi, max_rpi, rpi_limit);
18695 * Don't try to allocate more rpi header regions if the device limit
18696 * has been exhausted.
18698 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18699 (phba->sli4_hba.rpi_count >= max_rpi)) {
18700 spin_unlock_irqrestore(&phba->hbalock, iflag);
18705 * RPI header postings are not required for SLI4 ports capable of
18708 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18709 spin_unlock_irqrestore(&phba->hbalock, iflag);
18714 * If the driver is running low on rpi resources, allocate another
18715 * page now. Note that the next_rpi value is used because
18716 * it represents how many are actually in use whereas max_rpi notes
18717 * how many are supported max by the device.
18719 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18720 spin_unlock_irqrestore(&phba->hbalock, iflag);
18721 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18722 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18725 "2002 Error Could not grow rpi "
18728 lrpi = rpi_hdr->start_rpi;
18729 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18730 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18738 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18739 * @phba: pointer to lpfc hba data structure.
18740 * @rpi: rpi to free
18742 * This routine is invoked to release an rpi to the pool of
18743 * available rpis maintained by the driver.
18746 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18749 * if the rpi value indicates a prior unreg has already
18750 * been done, skip the unreg.
18752 if (rpi == LPFC_RPI_ALLOC_ERROR)
18755 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18756 phba->sli4_hba.rpi_count--;
18757 phba->sli4_hba.max_cfg_param.rpi_used--;
18759 lpfc_printf_log(phba, KERN_INFO,
18760 LOG_NODE | LOG_DISCOVERY,
18761 "2016 rpi %x not inuse\n",
18767 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18768 * @phba: pointer to lpfc hba data structure.
18769 * @rpi: rpi to free
18771 * This routine is invoked to release an rpi to the pool of
18772 * available rpis maintained by the driver.
18775 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18777 spin_lock_irq(&phba->hbalock);
18778 __lpfc_sli4_free_rpi(phba, rpi);
18779 spin_unlock_irq(&phba->hbalock);
18783 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18784 * @phba: pointer to lpfc hba data structure.
18786 * This routine is invoked to remove the memory region that
18787 * provided rpi via a bitmask.
18790 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18792 kfree(phba->sli4_hba.rpi_bmask);
18793 kfree(phba->sli4_hba.rpi_ids);
18794 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18798 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18799 * @ndlp: pointer to lpfc nodelist data structure.
18800 * @cmpl: completion call-back.
18801 * @arg: data to load as MBox 'caller buffer information'
18803 * This routine is invoked to remove the memory region that
18804 * provided rpi via a bitmask.
18807 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18808 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18810 LPFC_MBOXQ_t *mboxq;
18811 struct lpfc_hba *phba = ndlp->phba;
18814 /* The port is notified of the header region via a mailbox command. */
18815 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18819 /* Post all rpi memory regions to the port. */
18820 lpfc_resume_rpi(mboxq, ndlp);
18822 mboxq->mbox_cmpl = cmpl;
18823 mboxq->ctx_buf = arg;
18824 mboxq->ctx_ndlp = ndlp;
18826 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18827 mboxq->vport = ndlp->vport;
18828 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18829 if (rc == MBX_NOT_FINISHED) {
18830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18831 "2010 Resume RPI Mailbox failed "
18832 "status %d, mbxStatus x%x\n", rc,
18833 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18834 mempool_free(mboxq, phba->mbox_mem_pool);
18841 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18842 * @vport: Pointer to the vport for which the vpi is being initialized
18844 * This routine is invoked to activate a vpi with the port.
18848 * -Evalue otherwise
18851 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18853 LPFC_MBOXQ_t *mboxq;
18855 int retval = MBX_SUCCESS;
18857 struct lpfc_hba *phba = vport->phba;
18858 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18861 lpfc_init_vpi(phba, mboxq, vport->vpi);
18862 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18863 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18864 if (rc != MBX_SUCCESS) {
18865 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18866 "2022 INIT VPI Mailbox failed "
18867 "status %d, mbxStatus x%x\n", rc,
18868 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18871 if (rc != MBX_TIMEOUT)
18872 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18878 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18879 * @phba: pointer to lpfc hba data structure.
18880 * @mboxq: Pointer to mailbox object.
18882 * This routine is invoked to manually add a single FCF record. The caller
18883 * must pass a completely initialized FCF_Record. This routine takes
18884 * care of the nonembedded mailbox operations.
18887 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18890 union lpfc_sli4_cfg_shdr *shdr;
18891 uint32_t shdr_status, shdr_add_status;
18893 virt_addr = mboxq->sge_array->addr[0];
18894 /* The IOCTL status is embedded in the mailbox subheader. */
18895 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18896 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18897 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18899 if ((shdr_status || shdr_add_status) &&
18900 (shdr_status != STATUS_FCF_IN_USE))
18901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18902 "2558 ADD_FCF_RECORD mailbox failed with "
18903 "status x%x add_status x%x\n",
18904 shdr_status, shdr_add_status);
18906 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18910 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18911 * @phba: pointer to lpfc hba data structure.
18912 * @fcf_record: pointer to the initialized fcf record to add.
18914 * This routine is invoked to manually add a single FCF record. The caller
18915 * must pass a completely initialized FCF_Record. This routine takes
18916 * care of the nonembedded mailbox operations.
18919 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18922 LPFC_MBOXQ_t *mboxq;
18925 struct lpfc_mbx_sge sge;
18926 uint32_t alloc_len, req_len;
18929 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18932 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18936 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18939 /* Allocate DMA memory and set up the non-embedded mailbox command */
18940 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18941 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18942 req_len, LPFC_SLI4_MBX_NEMBED);
18943 if (alloc_len < req_len) {
18944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18945 "2523 Allocated DMA memory size (x%x) is "
18946 "less than the requested DMA memory "
18947 "size (x%x)\n", alloc_len, req_len);
18948 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18953 * Get the first SGE entry from the non-embedded DMA memory. This
18954 * routine only uses a single SGE.
18956 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18957 virt_addr = mboxq->sge_array->addr[0];
18959 * Configure the FCF record for FCFI 0. This is the driver's
18960 * hardcoded default and gets used in nonFIP mode.
18962 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18963 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18964 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18967 * Copy the fcf_index and the FCF Record Data. The data starts after
18968 * the FCoE header plus word10. The data copy needs to be endian
18971 bytep += sizeof(uint32_t);
18972 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18973 mboxq->vport = phba->pport;
18974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18976 if (rc == MBX_NOT_FINISHED) {
18977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18978 "2515 ADD_FCF_RECORD mailbox failed with "
18979 "status 0x%x\n", rc);
18980 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18989 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18990 * @phba: pointer to lpfc hba data structure.
18991 * @fcf_record: pointer to the fcf record to write the default data.
18992 * @fcf_index: FCF table entry index.
18994 * This routine is invoked to build the driver's default FCF record. The
18995 * values used are hardcoded. This routine handles memory initialization.
18999 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19000 struct fcf_record *fcf_record,
19001 uint16_t fcf_index)
19003 memset(fcf_record, 0, sizeof(struct fcf_record));
19004 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19005 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19006 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19007 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19008 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19009 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19010 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19011 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19012 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19013 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19014 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19015 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19016 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19017 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19018 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19019 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19020 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19021 /* Set the VLAN bit map */
19022 if (phba->valid_vlan) {
19023 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19024 = 1 << (phba->vlan_id % 8);
19029 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19030 * @phba: pointer to lpfc hba data structure.
19031 * @fcf_index: FCF table entry offset.
19033 * This routine is invoked to scan the entire FCF table by reading FCF
19034 * record and processing it one at a time starting from the @fcf_index
19035 * for initial FCF discovery or fast FCF failover rediscovery.
19037 * Return 0 if the mailbox command is submitted successfully, none 0
19041 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19044 LPFC_MBOXQ_t *mboxq;
19046 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19047 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19048 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19051 "2000 Failed to allocate mbox for "
19054 goto fail_fcf_scan;
19056 /* Construct the read FCF record mailbox command */
19057 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19060 goto fail_fcf_scan;
19062 /* Issue the mailbox command asynchronously */
19063 mboxq->vport = phba->pport;
19064 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19066 spin_lock_irq(&phba->hbalock);
19067 phba->hba_flag |= FCF_TS_INPROG;
19068 spin_unlock_irq(&phba->hbalock);
19070 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19071 if (rc == MBX_NOT_FINISHED)
19074 /* Reset eligible FCF count for new scan */
19075 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19076 phba->fcf.eligible_fcf_cnt = 0;
19082 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19083 /* FCF scan failed, clear FCF_TS_INPROG flag */
19084 spin_lock_irq(&phba->hbalock);
19085 phba->hba_flag &= ~FCF_TS_INPROG;
19086 spin_unlock_irq(&phba->hbalock);
19092 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19093 * @phba: pointer to lpfc hba data structure.
19094 * @fcf_index: FCF table entry offset.
19096 * This routine is invoked to read an FCF record indicated by @fcf_index
19097 * and to use it for FLOGI roundrobin FCF failover.
19099 * Return 0 if the mailbox command is submitted successfully, none 0
19103 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19106 LPFC_MBOXQ_t *mboxq;
19108 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19110 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19111 "2763 Failed to allocate mbox for "
19114 goto fail_fcf_read;
19116 /* Construct the read FCF record mailbox command */
19117 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19120 goto fail_fcf_read;
19122 /* Issue the mailbox command asynchronously */
19123 mboxq->vport = phba->pport;
19124 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19125 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19126 if (rc == MBX_NOT_FINISHED)
19132 if (error && mboxq)
19133 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19138 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19139 * @phba: pointer to lpfc hba data structure.
19140 * @fcf_index: FCF table entry offset.
19142 * This routine is invoked to read an FCF record indicated by @fcf_index to
19143 * determine whether it's eligible for FLOGI roundrobin failover list.
19145 * Return 0 if the mailbox command is submitted successfully, none 0
19149 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19152 LPFC_MBOXQ_t *mboxq;
19154 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19156 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19157 "2758 Failed to allocate mbox for "
19160 goto fail_fcf_read;
19162 /* Construct the read FCF record mailbox command */
19163 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19166 goto fail_fcf_read;
19168 /* Issue the mailbox command asynchronously */
19169 mboxq->vport = phba->pport;
19170 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19171 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19172 if (rc == MBX_NOT_FINISHED)
19178 if (error && mboxq)
19179 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19184 * lpfc_check_next_fcf_pri_level
19185 * @phba: pointer to the lpfc_hba struct for this port.
19186 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19187 * routine when the rr_bmask is empty. The FCF indecies are put into the
19188 * rr_bmask based on their priority level. Starting from the highest priority
19189 * to the lowest. The most likely FCF candidate will be in the highest
19190 * priority group. When this routine is called it searches the fcf_pri list for
19191 * next lowest priority group and repopulates the rr_bmask with only those
19194 * 1=success 0=failure
19197 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19199 uint16_t next_fcf_pri;
19200 uint16_t last_index;
19201 struct lpfc_fcf_pri *fcf_pri;
19205 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19206 LPFC_SLI4_FCF_TBL_INDX_MAX);
19207 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19208 "3060 Last IDX %d\n", last_index);
19210 /* Verify the priority list has 2 or more entries */
19211 spin_lock_irq(&phba->hbalock);
19212 if (list_empty(&phba->fcf.fcf_pri_list) ||
19213 list_is_singular(&phba->fcf.fcf_pri_list)) {
19214 spin_unlock_irq(&phba->hbalock);
19215 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19216 "3061 Last IDX %d\n", last_index);
19217 return 0; /* Empty rr list */
19219 spin_unlock_irq(&phba->hbalock);
19223 * Clear the rr_bmask and set all of the bits that are at this
19226 memset(phba->fcf.fcf_rr_bmask, 0,
19227 sizeof(*phba->fcf.fcf_rr_bmask));
19228 spin_lock_irq(&phba->hbalock);
19229 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19230 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19233 * the 1st priority that has not FLOGI failed
19234 * will be the highest.
19237 next_fcf_pri = fcf_pri->fcf_rec.priority;
19238 spin_unlock_irq(&phba->hbalock);
19239 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19240 rc = lpfc_sli4_fcf_rr_index_set(phba,
19241 fcf_pri->fcf_rec.fcf_index);
19245 spin_lock_irq(&phba->hbalock);
19248 * if next_fcf_pri was not set above and the list is not empty then
19249 * we have failed flogis on all of them. So reset flogi failed
19250 * and start at the beginning.
19252 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19253 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19254 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19256 * the 1st priority that has not FLOGI failed
19257 * will be the highest.
19260 next_fcf_pri = fcf_pri->fcf_rec.priority;
19261 spin_unlock_irq(&phba->hbalock);
19262 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19263 rc = lpfc_sli4_fcf_rr_index_set(phba,
19264 fcf_pri->fcf_rec.fcf_index);
19268 spin_lock_irq(&phba->hbalock);
19272 spin_unlock_irq(&phba->hbalock);
19277 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19278 * @phba: pointer to lpfc hba data structure.
19280 * This routine is to get the next eligible FCF record index in a round
19281 * robin fashion. If the next eligible FCF record index equals to the
19282 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19283 * shall be returned, otherwise, the next eligible FCF record's index
19284 * shall be returned.
19287 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19289 uint16_t next_fcf_index;
19292 /* Search start from next bit of currently registered FCF index */
19293 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19296 /* Determine the next fcf index to check */
19297 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19298 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19299 LPFC_SLI4_FCF_TBL_INDX_MAX,
19302 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19303 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19305 * If we have wrapped then we need to clear the bits that
19306 * have been tested so that we can detect when we should
19307 * change the priority level.
19309 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19310 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19314 /* Check roundrobin failover list empty condition */
19315 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19316 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19318 * If next fcf index is not found check if there are lower
19319 * Priority level fcf's in the fcf_priority list.
19320 * Set up the rr_bmask with all of the avaiable fcf bits
19321 * at that level and continue the selection process.
19323 if (lpfc_check_next_fcf_pri_level(phba))
19324 goto initial_priority;
19325 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19326 "2844 No roundrobin failover FCF available\n");
19328 return LPFC_FCOE_FCF_NEXT_NONE;
19331 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19332 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19333 LPFC_FCF_FLOGI_FAILED) {
19334 if (list_is_singular(&phba->fcf.fcf_pri_list))
19335 return LPFC_FCOE_FCF_NEXT_NONE;
19337 goto next_priority;
19340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19341 "2845 Get next roundrobin failover FCF (x%x)\n",
19344 return next_fcf_index;
19348 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19349 * @phba: pointer to lpfc hba data structure.
19350 * @fcf_index: index into the FCF table to 'set'
19352 * This routine sets the FCF record index in to the eligible bmask for
19353 * roundrobin failover search. It checks to make sure that the index
19354 * does not go beyond the range of the driver allocated bmask dimension
19355 * before setting the bit.
19357 * Returns 0 if the index bit successfully set, otherwise, it returns
19361 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19363 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19364 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19365 "2610 FCF (x%x) reached driver's book "
19366 "keeping dimension:x%x\n",
19367 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19370 /* Set the eligible FCF record index bmask */
19371 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19373 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19374 "2790 Set FCF (x%x) to roundrobin FCF failover "
19375 "bmask\n", fcf_index);
19381 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19382 * @phba: pointer to lpfc hba data structure.
19383 * @fcf_index: index into the FCF table to 'clear'
19385 * This routine clears the FCF record index from the eligible bmask for
19386 * roundrobin failover search. It checks to make sure that the index
19387 * does not go beyond the range of the driver allocated bmask dimension
19388 * before clearing the bit.
19391 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19393 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19394 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19395 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19396 "2762 FCF (x%x) reached driver's book "
19397 "keeping dimension:x%x\n",
19398 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19401 /* Clear the eligible FCF record index bmask */
19402 spin_lock_irq(&phba->hbalock);
19403 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19405 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19406 list_del_init(&fcf_pri->list);
19410 spin_unlock_irq(&phba->hbalock);
19411 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19413 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19414 "2791 Clear FCF (x%x) from roundrobin failover "
19415 "bmask\n", fcf_index);
19419 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19420 * @phba: pointer to lpfc hba data structure.
19421 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19423 * This routine is the completion routine for the rediscover FCF table mailbox
19424 * command. If the mailbox command returned failure, it will try to stop the
19425 * FCF rediscover wait timer.
19428 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19430 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19431 uint32_t shdr_status, shdr_add_status;
19433 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19435 shdr_status = bf_get(lpfc_mbox_hdr_status,
19436 &redisc_fcf->header.cfg_shdr.response);
19437 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19438 &redisc_fcf->header.cfg_shdr.response);
19439 if (shdr_status || shdr_add_status) {
19440 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19441 "2746 Requesting for FCF rediscovery failed "
19442 "status x%x add_status x%x\n",
19443 shdr_status, shdr_add_status);
19444 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19445 spin_lock_irq(&phba->hbalock);
19446 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19447 spin_unlock_irq(&phba->hbalock);
19449 * CVL event triggered FCF rediscover request failed,
19450 * last resort to re-try current registered FCF entry.
19452 lpfc_retry_pport_discovery(phba);
19454 spin_lock_irq(&phba->hbalock);
19455 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19456 spin_unlock_irq(&phba->hbalock);
19458 * DEAD FCF event triggered FCF rediscover request
19459 * failed, last resort to fail over as a link down
19460 * to FCF registration.
19462 lpfc_sli4_fcf_dead_failthrough(phba);
19465 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19466 "2775 Start FCF rediscover quiescent timer\n");
19468 * Start FCF rediscovery wait timer for pending FCF
19469 * before rescan FCF record table.
19471 lpfc_fcf_redisc_wait_start_timer(phba);
19474 mempool_free(mbox, phba->mbox_mem_pool);
19478 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19479 * @phba: pointer to lpfc hba data structure.
19481 * This routine is invoked to request for rediscovery of the entire FCF table
19485 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19487 LPFC_MBOXQ_t *mbox;
19488 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19491 /* Cancel retry delay timers to all vports before FCF rediscover */
19492 lpfc_cancel_all_vport_retry_delay_timer(phba);
19494 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19497 "2745 Failed to allocate mbox for "
19498 "requesting FCF rediscover.\n");
19502 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19503 sizeof(struct lpfc_sli4_cfg_mhdr));
19504 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19505 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19506 length, LPFC_SLI4_MBX_EMBED);
19508 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19509 /* Set count to 0 for invalidating the entire FCF database */
19510 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19512 /* Issue the mailbox command asynchronously */
19513 mbox->vport = phba->pport;
19514 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19515 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19517 if (rc == MBX_NOT_FINISHED) {
19518 mempool_free(mbox, phba->mbox_mem_pool);
19525 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19526 * @phba: pointer to lpfc hba data structure.
19528 * This function is the failover routine as a last resort to the FCF DEAD
19529 * event when driver failed to perform fast FCF failover.
19532 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19534 uint32_t link_state;
19537 * Last resort as FCF DEAD event failover will treat this as
19538 * a link down, but save the link state because we don't want
19539 * it to be changed to Link Down unless it is already down.
19541 link_state = phba->link_state;
19542 lpfc_linkdown(phba);
19543 phba->link_state = link_state;
19545 /* Unregister FCF if no devices connected to it */
19546 lpfc_unregister_unused_fcf(phba);
19550 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19551 * @phba: pointer to lpfc hba data structure.
19552 * @rgn23_data: pointer to configure region 23 data.
19554 * This function gets SLI3 port configure region 23 data through memory dump
19555 * mailbox command. When it successfully retrieves data, the size of the data
19556 * will be returned, otherwise, 0 will be returned.
19559 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19561 LPFC_MBOXQ_t *pmb = NULL;
19563 uint32_t offset = 0;
19569 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19571 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19572 "2600 failed to allocate mailbox memory\n");
19578 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19581 if (rc != MBX_SUCCESS) {
19582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19583 "2601 failed to read config "
19584 "region 23, rc 0x%x Status 0x%x\n",
19585 rc, mb->mbxStatus);
19586 mb->un.varDmp.word_cnt = 0;
19589 * dump mem may return a zero when finished or we got a
19590 * mailbox error, either way we are done.
19592 if (mb->un.varDmp.word_cnt == 0)
19595 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
19596 if (offset + i > DMP_RGN23_SIZE)
19597 i = DMP_RGN23_SIZE - offset;
19598 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19599 rgn23_data + offset, i);
19601 } while (offset < DMP_RGN23_SIZE);
19603 mempool_free(pmb, phba->mbox_mem_pool);
19608 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19609 * @phba: pointer to lpfc hba data structure.
19610 * @rgn23_data: pointer to configure region 23 data.
19612 * This function gets SLI4 port configure region 23 data through memory dump
19613 * mailbox command. When it successfully retrieves data, the size of the data
19614 * will be returned, otherwise, 0 will be returned.
19617 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19619 LPFC_MBOXQ_t *mboxq = NULL;
19620 struct lpfc_dmabuf *mp = NULL;
19621 struct lpfc_mqe *mqe;
19622 uint32_t data_length = 0;
19628 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19631 "3105 failed to allocate mailbox memory\n");
19635 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19637 mqe = &mboxq->u.mqe;
19638 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19642 data_length = mqe->un.mb_words[5];
19643 if (data_length == 0)
19645 if (data_length > DMP_RGN23_SIZE) {
19649 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19651 mempool_free(mboxq, phba->mbox_mem_pool);
19653 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19656 return data_length;
19660 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19661 * @phba: pointer to lpfc hba data structure.
19663 * This function read region 23 and parse TLV for port status to
19664 * decide if the user disaled the port. If the TLV indicates the
19665 * port is disabled, the hba_flag is set accordingly.
19668 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19670 uint8_t *rgn23_data = NULL;
19671 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19672 uint32_t offset = 0;
19674 /* Get adapter Region 23 data */
19675 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19679 if (phba->sli_rev < LPFC_SLI_REV4)
19680 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19682 if_type = bf_get(lpfc_sli_intf_if_type,
19683 &phba->sli4_hba.sli_intf);
19684 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19686 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19692 /* Check the region signature first */
19693 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19695 "2619 Config region 23 has bad signature\n");
19700 /* Check the data structure version */
19701 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19703 "2620 Config region 23 has bad version\n");
19708 /* Parse TLV entries in the region */
19709 while (offset < data_size) {
19710 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19713 * If the TLV is not driver specific TLV or driver id is
19714 * not linux driver id, skip the record.
19716 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19717 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19718 (rgn23_data[offset + 3] != 0)) {
19719 offset += rgn23_data[offset + 1] * 4 + 4;
19723 /* Driver found a driver specific TLV in the config region */
19724 sub_tlv_len = rgn23_data[offset + 1] * 4;
19729 * Search for configured port state sub-TLV.
19731 while ((offset < data_size) &&
19732 (tlv_offset < sub_tlv_len)) {
19733 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19738 if (rgn23_data[offset] != PORT_STE_TYPE) {
19739 offset += rgn23_data[offset + 1] * 4 + 4;
19740 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19744 /* This HBA contains PORT_STE configured */
19745 if (!rgn23_data[offset + 2])
19746 phba->hba_flag |= LINK_DISABLED;
19758 * lpfc_wr_object - write an object to the firmware
19759 * @phba: HBA structure that indicates port to create a queue on.
19760 * @dmabuf_list: list of dmabufs to write to the port.
19761 * @size: the total byte value of the objects to write to the port.
19762 * @offset: the current offset to be used to start the transfer.
19764 * This routine will create a wr_object mailbox command to send to the port.
19765 * the mailbox command will be constructed using the dma buffers described in
19766 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19767 * BDEs that the imbedded mailbox can support. The @offset variable will be
19768 * used to indicate the starting offset of the transfer and will also return
19769 * the offset after the write object mailbox has completed. @size is used to
19770 * determine the end of the object and whether the eof bit should be set.
19772 * Return 0 is successful and offset will contain the the new offset to use
19773 * for the next write.
19774 * Return negative value for error cases.
19777 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19778 uint32_t size, uint32_t *offset)
19780 struct lpfc_mbx_wr_object *wr_object;
19781 LPFC_MBOXQ_t *mbox;
19783 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19785 struct lpfc_dmabuf *dmabuf;
19786 uint32_t written = 0;
19787 bool check_change_status = false;
19789 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19793 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19794 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19795 sizeof(struct lpfc_mbx_wr_object) -
19796 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19798 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19799 wr_object->u.request.write_offset = *offset;
19800 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19801 wr_object->u.request.object_name[0] =
19802 cpu_to_le32(wr_object->u.request.object_name[0]);
19803 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19804 list_for_each_entry(dmabuf, dmabuf_list, list) {
19805 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19807 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19808 wr_object->u.request.bde[i].addrHigh =
19809 putPaddrHigh(dmabuf->phys);
19810 if (written + SLI4_PAGE_SIZE >= size) {
19811 wr_object->u.request.bde[i].tus.f.bdeSize =
19813 written += (size - written);
19814 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19815 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19816 check_change_status = true;
19818 wr_object->u.request.bde[i].tus.f.bdeSize =
19820 written += SLI4_PAGE_SIZE;
19824 wr_object->u.request.bde_count = i;
19825 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19826 if (!phba->sli4_hba.intr_enable)
19827 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19829 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19830 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19832 /* The IOCTL status is embedded in the mailbox subheader. */
19833 shdr_status = bf_get(lpfc_mbox_hdr_status,
19834 &wr_object->header.cfg_shdr.response);
19835 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19836 &wr_object->header.cfg_shdr.response);
19837 if (check_change_status) {
19838 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19839 &wr_object->u.response);
19841 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19842 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19843 shdr_csf = bf_get(lpfc_wr_object_csf,
19844 &wr_object->u.response);
19846 shdr_change_status =
19847 LPFC_CHANGE_STATUS_PCI_RESET;
19850 switch (shdr_change_status) {
19851 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19852 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19853 "3198 Firmware write complete: System "
19854 "reboot required to instantiate\n");
19856 case (LPFC_CHANGE_STATUS_FW_RESET):
19857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19858 "3199 Firmware write complete: Firmware"
19859 " reset required to instantiate\n");
19861 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19862 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19863 "3200 Firmware write complete: Port "
19864 "Migration or PCI Reset required to "
19867 case (LPFC_CHANGE_STATUS_PCI_RESET):
19868 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19869 "3201 Firmware write complete: PCI "
19870 "Reset required to instantiate\n");
19876 if (!phba->sli4_hba.intr_enable)
19877 mempool_free(mbox, phba->mbox_mem_pool);
19878 else if (rc != MBX_TIMEOUT)
19879 mempool_free(mbox, phba->mbox_mem_pool);
19880 if (shdr_status || shdr_add_status || rc) {
19881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19882 "3025 Write Object mailbox failed with "
19883 "status x%x add_status x%x, mbx status x%x\n",
19884 shdr_status, shdr_add_status, rc);
19886 *offset = shdr_add_status;
19888 *offset += wr_object->u.response.actual_write_length;
19893 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19894 * @vport: pointer to vport data structure.
19896 * This function iterate through the mailboxq and clean up all REG_LOGIN
19897 * and REG_VPI mailbox commands associated with the vport. This function
19898 * is called when driver want to restart discovery of the vport due to
19899 * a Clear Virtual Link event.
19902 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19904 struct lpfc_hba *phba = vport->phba;
19905 LPFC_MBOXQ_t *mb, *nextmb;
19906 struct lpfc_dmabuf *mp;
19907 struct lpfc_nodelist *ndlp;
19908 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19909 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19910 LIST_HEAD(mbox_cmd_list);
19911 uint8_t restart_loop;
19913 /* Clean up internally queued mailbox commands with the vport */
19914 spin_lock_irq(&phba->hbalock);
19915 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19916 if (mb->vport != vport)
19919 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19920 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19923 list_del(&mb->list);
19924 list_add_tail(&mb->list, &mbox_cmd_list);
19926 /* Clean up active mailbox command with the vport */
19927 mb = phba->sli.mbox_active;
19928 if (mb && (mb->vport == vport)) {
19929 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19930 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19931 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19932 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19933 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19934 /* Put reference count for delayed processing */
19935 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19936 /* Unregister the RPI when mailbox complete */
19937 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19940 /* Cleanup any mailbox completions which are not yet processed */
19943 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19945 * If this mailox is already processed or it is
19946 * for another vport ignore it.
19948 if ((mb->vport != vport) ||
19949 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19952 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19953 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19956 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19957 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19958 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19959 /* Unregister the RPI when mailbox complete */
19960 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19962 spin_unlock_irq(&phba->hbalock);
19963 spin_lock(shost->host_lock);
19964 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19965 spin_unlock(shost->host_lock);
19966 spin_lock_irq(&phba->hbalock);
19970 } while (restart_loop);
19972 spin_unlock_irq(&phba->hbalock);
19974 /* Release the cleaned-up mailbox commands */
19975 while (!list_empty(&mbox_cmd_list)) {
19976 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19977 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19978 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19980 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19983 mb->ctx_buf = NULL;
19984 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19985 mb->ctx_ndlp = NULL;
19987 spin_lock(shost->host_lock);
19988 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19989 spin_unlock(shost->host_lock);
19990 lpfc_nlp_put(ndlp);
19993 mempool_free(mb, phba->mbox_mem_pool);
19996 /* Release the ndlp with the cleaned-up active mailbox command */
19997 if (act_mbx_ndlp) {
19998 spin_lock(shost->host_lock);
19999 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20000 spin_unlock(shost->host_lock);
20001 lpfc_nlp_put(act_mbx_ndlp);
20006 * lpfc_drain_txq - Drain the txq
20007 * @phba: Pointer to HBA context object.
20009 * This function attempt to submit IOCBs on the txq
20010 * to the adapter. For SLI4 adapters, the txq contains
20011 * ELS IOCBs that have been deferred because the there
20012 * are no SGLs. This congestion can occur with large
20013 * vport counts during node discovery.
20017 lpfc_drain_txq(struct lpfc_hba *phba)
20019 LIST_HEAD(completions);
20020 struct lpfc_sli_ring *pring;
20021 struct lpfc_iocbq *piocbq = NULL;
20022 unsigned long iflags = 0;
20023 char *fail_msg = NULL;
20024 struct lpfc_sglq *sglq;
20025 union lpfc_wqe128 wqe;
20026 uint32_t txq_cnt = 0;
20027 struct lpfc_queue *wq;
20029 if (phba->link_flag & LS_MDS_LOOPBACK) {
20030 /* MDS WQE are posted only to first WQ*/
20031 wq = phba->sli4_hba.hdwq[0].io_wq;
20036 wq = phba->sli4_hba.els_wq;
20039 pring = lpfc_phba_elsring(phba);
20042 if (unlikely(!pring) || list_empty(&pring->txq))
20045 spin_lock_irqsave(&pring->ring_lock, iflags);
20046 list_for_each_entry(piocbq, &pring->txq, list) {
20050 if (txq_cnt > pring->txq_max)
20051 pring->txq_max = txq_cnt;
20053 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20055 while (!list_empty(&pring->txq)) {
20056 spin_lock_irqsave(&pring->ring_lock, iflags);
20058 piocbq = lpfc_sli_ringtx_get(phba, pring);
20060 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20062 "2823 txq empty and txq_cnt is %d\n ",
20066 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20068 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20069 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20074 /* The xri and iocb resources secured,
20075 * attempt to issue request
20077 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20078 piocbq->sli4_xritag = sglq->sli4_xritag;
20079 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20080 fail_msg = "to convert bpl to sgl";
20081 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20082 fail_msg = "to convert iocb to wqe";
20083 else if (lpfc_sli4_wq_put(wq, &wqe))
20084 fail_msg = " - Wq is full";
20086 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20089 /* Failed means we can't issue and need to cancel */
20090 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20091 "2822 IOCB failed %s iotag 0x%x "
20094 piocbq->iotag, piocbq->sli4_xritag);
20095 list_add_tail(&piocbq->list, &completions);
20098 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20101 /* Cancel all the IOCBs that cannot be issued */
20102 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20103 IOERR_SLI_ABORTED);
20109 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20110 * @phba: Pointer to HBA context object.
20111 * @pwqeq: Pointer to command WQE.
20112 * @sglq: Pointer to the scatter gather queue object.
20114 * This routine converts the bpl or bde that is in the WQE
20115 * to a sgl list for the sli4 hardware. The physical address
20116 * of the bpl/bde is converted back to a virtual address.
20117 * If the WQE contains a BPL then the list of BDE's is
20118 * converted to sli4_sge's. If the WQE contains a single
20119 * BDE then it is converted to a single sli_sge.
20120 * The WQE is still in cpu endianness so the contents of
20121 * the bpl can be used without byte swapping.
20123 * Returns valid XRI = Success, NO_XRI = Failure.
20126 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20127 struct lpfc_sglq *sglq)
20129 uint16_t xritag = NO_XRI;
20130 struct ulp_bde64 *bpl = NULL;
20131 struct ulp_bde64 bde;
20132 struct sli4_sge *sgl = NULL;
20133 struct lpfc_dmabuf *dmabuf;
20134 union lpfc_wqe128 *wqe;
20137 uint32_t offset = 0; /* accumulated offset in the sg request list */
20138 int inbound = 0; /* number of sg reply entries inbound from firmware */
20141 if (!pwqeq || !sglq)
20144 sgl = (struct sli4_sge *)sglq->sgl;
20146 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20148 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20149 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20150 return sglq->sli4_xritag;
20151 numBdes = pwqeq->rsvd2;
20153 /* The addrHigh and addrLow fields within the WQE
20154 * have not been byteswapped yet so there is no
20155 * need to swap them back.
20157 if (pwqeq->context3)
20158 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20162 bpl = (struct ulp_bde64 *)dmabuf->virt;
20166 for (i = 0; i < numBdes; i++) {
20167 /* Should already be byte swapped. */
20168 sgl->addr_hi = bpl->addrHigh;
20169 sgl->addr_lo = bpl->addrLow;
20171 sgl->word2 = le32_to_cpu(sgl->word2);
20172 if ((i+1) == numBdes)
20173 bf_set(lpfc_sli4_sge_last, sgl, 1);
20175 bf_set(lpfc_sli4_sge_last, sgl, 0);
20176 /* swap the size field back to the cpu so we
20177 * can assign it to the sgl.
20179 bde.tus.w = le32_to_cpu(bpl->tus.w);
20180 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20181 /* The offsets in the sgl need to be accumulated
20182 * separately for the request and reply lists.
20183 * The request is always first, the reply follows.
20186 case CMD_GEN_REQUEST64_WQE:
20187 /* add up the reply sg entries */
20188 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20190 /* first inbound? reset the offset */
20193 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20194 bf_set(lpfc_sli4_sge_type, sgl,
20195 LPFC_SGE_TYPE_DATA);
20196 offset += bde.tus.f.bdeSize;
20198 case CMD_FCP_TRSP64_WQE:
20199 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20200 bf_set(lpfc_sli4_sge_type, sgl,
20201 LPFC_SGE_TYPE_DATA);
20203 case CMD_FCP_TSEND64_WQE:
20204 case CMD_FCP_TRECEIVE64_WQE:
20205 bf_set(lpfc_sli4_sge_type, sgl,
20206 bpl->tus.f.bdeFlags);
20210 offset += bde.tus.f.bdeSize;
20211 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20214 sgl->word2 = cpu_to_le32(sgl->word2);
20218 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20219 /* The addrHigh and addrLow fields of the BDE have not
20220 * been byteswapped yet so they need to be swapped
20221 * before putting them in the sgl.
20223 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20224 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20225 sgl->word2 = le32_to_cpu(sgl->word2);
20226 bf_set(lpfc_sli4_sge_last, sgl, 1);
20227 sgl->word2 = cpu_to_le32(sgl->word2);
20228 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20230 return sglq->sli4_xritag;
20234 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20235 * @phba: Pointer to HBA context object.
20236 * @qp: Pointer to HDW queue.
20237 * @pwqe: Pointer to command WQE.
20240 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20241 struct lpfc_iocbq *pwqe)
20243 union lpfc_wqe128 *wqe = &pwqe->wqe;
20244 struct lpfc_async_xchg_ctx *ctxp;
20245 struct lpfc_queue *wq;
20246 struct lpfc_sglq *sglq;
20247 struct lpfc_sli_ring *pring;
20248 unsigned long iflags;
20251 /* NVME_LS and NVME_LS ABTS requests. */
20252 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20253 pring = phba->sli4_hba.nvmels_wq->pring;
20254 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20256 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20258 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20261 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20262 pwqe->sli4_xritag = sglq->sli4_xritag;
20263 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20264 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20267 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20268 pwqe->sli4_xritag);
20269 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20271 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20275 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20276 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20278 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20282 /* NVME_FCREQ and NVME_ABTS requests */
20283 if (pwqe->iocb_flag & LPFC_IO_NVME) {
20284 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20288 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20290 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20292 ret = lpfc_sli4_wq_put(wq, wqe);
20294 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20297 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20298 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20300 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20304 /* NVMET requests */
20305 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20306 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20310 ctxp = pwqe->context2;
20311 sglq = ctxp->ctxbuf->sglq;
20312 if (pwqe->sli4_xritag == NO_XRI) {
20313 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20314 pwqe->sli4_xritag = sglq->sli4_xritag;
20316 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20317 pwqe->sli4_xritag);
20318 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20320 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20322 ret = lpfc_sli4_wq_put(wq, wqe);
20324 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20327 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20328 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20330 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20336 #ifdef LPFC_MXP_STAT
20338 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20339 * @phba: pointer to lpfc hba data structure.
20340 * @hwqid: belong to which HWQ.
20342 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20343 * 15 seconds after a test case is running.
20345 * The user should call lpfc_debugfs_multixripools_write before running a test
20346 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20347 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20348 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20349 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20351 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20353 struct lpfc_sli4_hdw_queue *qp;
20354 struct lpfc_multixri_pool *multixri_pool;
20355 struct lpfc_pvt_pool *pvt_pool;
20356 struct lpfc_pbl_pool *pbl_pool;
20359 qp = &phba->sli4_hba.hdwq[hwqid];
20360 multixri_pool = qp->p_multixri_pool;
20361 if (!multixri_pool)
20364 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20365 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20366 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20367 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20369 multixri_pool->stat_pbl_count = pbl_pool->count;
20370 multixri_pool->stat_pvt_count = pvt_pool->count;
20371 multixri_pool->stat_busy_count = txcmplq_cnt;
20374 multixri_pool->stat_snapshot_taken++;
20379 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20380 * @phba: pointer to lpfc hba data structure.
20381 * @hwqid: belong to which HWQ.
20383 * This routine moves some XRIs from private to public pool when private pool
20386 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20388 struct lpfc_multixri_pool *multixri_pool;
20390 u32 prev_io_req_count;
20392 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20393 if (!multixri_pool)
20395 io_req_count = multixri_pool->io_req_count;
20396 prev_io_req_count = multixri_pool->prev_io_req_count;
20398 if (prev_io_req_count != io_req_count) {
20399 /* Private pool is busy */
20400 multixri_pool->prev_io_req_count = io_req_count;
20402 /* Private pool is not busy.
20403 * Move XRIs from private to public pool.
20405 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20410 * lpfc_adjust_high_watermark - Adjust high watermark
20411 * @phba: pointer to lpfc hba data structure.
20412 * @hwqid: belong to which HWQ.
20414 * This routine sets high watermark as number of outstanding XRIs,
20415 * but make sure the new value is between xri_limit/2 and xri_limit.
20417 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20425 struct lpfc_multixri_pool *multixri_pool;
20426 struct lpfc_sli4_hdw_queue *qp;
20428 qp = &phba->sli4_hba.hdwq[hwqid];
20429 multixri_pool = qp->p_multixri_pool;
20430 if (!multixri_pool)
20432 xri_limit = multixri_pool->xri_limit;
20434 watermark_max = xri_limit;
20435 watermark_min = xri_limit / 2;
20437 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20438 abts_io_bufs = qp->abts_scsi_io_bufs;
20439 abts_io_bufs += qp->abts_nvme_io_bufs;
20441 new_watermark = txcmplq_cnt + abts_io_bufs;
20442 new_watermark = min(watermark_max, new_watermark);
20443 new_watermark = max(watermark_min, new_watermark);
20444 multixri_pool->pvt_pool.high_watermark = new_watermark;
20446 #ifdef LPFC_MXP_STAT
20447 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20453 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20454 * @phba: pointer to lpfc hba data structure.
20455 * @hwqid: belong to which HWQ.
20457 * This routine is called from hearbeat timer when pvt_pool is idle.
20458 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20459 * The first step moves (all - low_watermark) amount of XRIs.
20460 * The second step moves the rest of XRIs.
20462 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20464 struct lpfc_pbl_pool *pbl_pool;
20465 struct lpfc_pvt_pool *pvt_pool;
20466 struct lpfc_sli4_hdw_queue *qp;
20467 struct lpfc_io_buf *lpfc_ncmd;
20468 struct lpfc_io_buf *lpfc_ncmd_next;
20469 unsigned long iflag;
20470 struct list_head tmp_list;
20473 qp = &phba->sli4_hba.hdwq[hwqid];
20474 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20475 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20478 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20479 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20481 if (pvt_pool->count > pvt_pool->low_watermark) {
20482 /* Step 1: move (all - low_watermark) from pvt_pool
20486 /* Move low watermark of bufs from pvt_pool to tmp_list */
20487 INIT_LIST_HEAD(&tmp_list);
20488 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20489 &pvt_pool->list, list) {
20490 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20492 if (tmp_count >= pvt_pool->low_watermark)
20496 /* Move all bufs from pvt_pool to pbl_pool */
20497 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20499 /* Move all bufs from tmp_list to pvt_pool */
20500 list_splice(&tmp_list, &pvt_pool->list);
20502 pbl_pool->count += (pvt_pool->count - tmp_count);
20503 pvt_pool->count = tmp_count;
20505 /* Step 2: move the rest from pvt_pool to pbl_pool */
20506 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20507 pbl_pool->count += pvt_pool->count;
20508 pvt_pool->count = 0;
20511 spin_unlock(&pvt_pool->lock);
20512 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20516 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20517 * @phba: pointer to lpfc hba data structure
20518 * @qp: pointer to HDW queue
20519 * @pbl_pool: specified public free XRI pool
20520 * @pvt_pool: specified private free XRI pool
20521 * @count: number of XRIs to move
20523 * This routine tries to move some free common bufs from the specified pbl_pool
20524 * to the specified pvt_pool. It might move less than count XRIs if there's not
20525 * enough in public pool.
20528 * true - if XRIs are successfully moved from the specified pbl_pool to the
20529 * specified pvt_pool
20530 * false - if the specified pbl_pool is empty or locked by someone else
20533 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20534 struct lpfc_pbl_pool *pbl_pool,
20535 struct lpfc_pvt_pool *pvt_pool, u32 count)
20537 struct lpfc_io_buf *lpfc_ncmd;
20538 struct lpfc_io_buf *lpfc_ncmd_next;
20539 unsigned long iflag;
20542 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20544 if (pbl_pool->count) {
20545 /* Move a batch of XRIs from public to private pool */
20546 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20547 list_for_each_entry_safe(lpfc_ncmd,
20551 list_move_tail(&lpfc_ncmd->list,
20560 spin_unlock(&pvt_pool->lock);
20561 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20564 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20571 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20572 * @phba: pointer to lpfc hba data structure.
20573 * @hwqid: belong to which HWQ.
20574 * @count: number of XRIs to move
20576 * This routine tries to find some free common bufs in one of public pools with
20577 * Round Robin method. The search always starts from local hwqid, then the next
20578 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20579 * a batch of free common bufs are moved to private pool on hwqid.
20580 * It might move less than count XRIs if there's not enough in public pool.
20582 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20584 struct lpfc_multixri_pool *multixri_pool;
20585 struct lpfc_multixri_pool *next_multixri_pool;
20586 struct lpfc_pvt_pool *pvt_pool;
20587 struct lpfc_pbl_pool *pbl_pool;
20588 struct lpfc_sli4_hdw_queue *qp;
20593 qp = &phba->sli4_hba.hdwq[hwqid];
20594 multixri_pool = qp->p_multixri_pool;
20595 pvt_pool = &multixri_pool->pvt_pool;
20596 pbl_pool = &multixri_pool->pbl_pool;
20598 /* Check if local pbl_pool is available */
20599 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20601 #ifdef LPFC_MXP_STAT
20602 multixri_pool->local_pbl_hit_count++;
20607 hwq_count = phba->cfg_hdw_queue;
20609 /* Get the next hwqid which was found last time */
20610 next_hwqid = multixri_pool->rrb_next_hwqid;
20613 /* Go to next hwq */
20614 next_hwqid = (next_hwqid + 1) % hwq_count;
20616 next_multixri_pool =
20617 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20618 pbl_pool = &next_multixri_pool->pbl_pool;
20620 /* Check if the public free xri pool is available */
20621 ret = _lpfc_move_xri_pbl_to_pvt(
20622 phba, qp, pbl_pool, pvt_pool, count);
20624 /* Exit while-loop if success or all hwqid are checked */
20625 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20627 /* Starting point for the next time */
20628 multixri_pool->rrb_next_hwqid = next_hwqid;
20631 /* stats: all public pools are empty*/
20632 multixri_pool->pbl_empty_count++;
20635 #ifdef LPFC_MXP_STAT
20637 if (next_hwqid == hwqid)
20638 multixri_pool->local_pbl_hit_count++;
20640 multixri_pool->other_pbl_hit_count++;
20646 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20647 * @phba: pointer to lpfc hba data structure.
20648 * @hwqid: belong to which HWQ.
20650 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20653 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20655 struct lpfc_multixri_pool *multixri_pool;
20656 struct lpfc_pvt_pool *pvt_pool;
20658 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20659 pvt_pool = &multixri_pool->pvt_pool;
20661 if (pvt_pool->count < pvt_pool->low_watermark)
20662 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20666 * lpfc_release_io_buf - Return one IO buf back to free pool
20667 * @phba: pointer to lpfc hba data structure.
20668 * @lpfc_ncmd: IO buf to be returned.
20669 * @qp: belong to which HWQ.
20671 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20672 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20673 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20674 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20675 * lpfc_io_buf_list_put.
20677 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20678 struct lpfc_sli4_hdw_queue *qp)
20680 unsigned long iflag;
20681 struct lpfc_pbl_pool *pbl_pool;
20682 struct lpfc_pvt_pool *pvt_pool;
20683 struct lpfc_epd_pool *epd_pool;
20689 /* MUST zero fields if buffer is reused by another protocol */
20690 lpfc_ncmd->nvmeCmd = NULL;
20691 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20692 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20694 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20695 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20696 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20698 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20699 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20701 if (phba->cfg_xri_rebalancing) {
20702 if (lpfc_ncmd->expedite) {
20703 /* Return to expedite pool */
20704 epd_pool = &phba->epd_pool;
20705 spin_lock_irqsave(&epd_pool->lock, iflag);
20706 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20708 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20712 /* Avoid invalid access if an IO sneaks in and is being rejected
20713 * just _after_ xri pools are destroyed in lpfc_offline.
20714 * Nothing much can be done at this point.
20716 if (!qp->p_multixri_pool)
20719 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20720 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20722 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20723 abts_io_bufs = qp->abts_scsi_io_bufs;
20724 abts_io_bufs += qp->abts_nvme_io_bufs;
20726 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20727 xri_limit = qp->p_multixri_pool->xri_limit;
20729 #ifdef LPFC_MXP_STAT
20730 if (xri_owned <= xri_limit)
20731 qp->p_multixri_pool->below_limit_count++;
20733 qp->p_multixri_pool->above_limit_count++;
20736 /* XRI goes to either public or private free xri pool
20737 * based on watermark and xri_limit
20739 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20740 (xri_owned < xri_limit &&
20741 pvt_pool->count < pvt_pool->high_watermark)) {
20742 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20743 qp, free_pvt_pool);
20744 list_add_tail(&lpfc_ncmd->list,
20747 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20749 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20750 qp, free_pub_pool);
20751 list_add_tail(&lpfc_ncmd->list,
20754 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20757 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20759 list_add_tail(&lpfc_ncmd->list,
20760 &qp->lpfc_io_buf_list_put);
20762 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20768 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20769 * @phba: pointer to lpfc hba data structure.
20770 * @qp: pointer to HDW queue
20771 * @pvt_pool: pointer to private pool data structure.
20772 * @ndlp: pointer to lpfc nodelist data structure.
20774 * This routine tries to get one free IO buf from private pool.
20777 * pointer to one free IO buf - if private pool is not empty
20778 * NULL - if private pool is empty
20780 static struct lpfc_io_buf *
20781 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20782 struct lpfc_sli4_hdw_queue *qp,
20783 struct lpfc_pvt_pool *pvt_pool,
20784 struct lpfc_nodelist *ndlp)
20786 struct lpfc_io_buf *lpfc_ncmd;
20787 struct lpfc_io_buf *lpfc_ncmd_next;
20788 unsigned long iflag;
20790 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20791 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20792 &pvt_pool->list, list) {
20793 if (lpfc_test_rrq_active(
20794 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20796 list_del(&lpfc_ncmd->list);
20798 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20801 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20807 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20808 * @phba: pointer to lpfc hba data structure.
20810 * This routine tries to get one free IO buf from expedite pool.
20813 * pointer to one free IO buf - if expedite pool is not empty
20814 * NULL - if expedite pool is empty
20816 static struct lpfc_io_buf *
20817 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20819 struct lpfc_io_buf *lpfc_ncmd;
20820 struct lpfc_io_buf *lpfc_ncmd_next;
20821 unsigned long iflag;
20822 struct lpfc_epd_pool *epd_pool;
20824 epd_pool = &phba->epd_pool;
20827 spin_lock_irqsave(&epd_pool->lock, iflag);
20828 if (epd_pool->count > 0) {
20829 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20830 &epd_pool->list, list) {
20831 list_del(&lpfc_ncmd->list);
20836 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20842 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20843 * @phba: pointer to lpfc hba data structure.
20844 * @ndlp: pointer to lpfc nodelist data structure.
20845 * @hwqid: belong to which HWQ
20846 * @expedite: 1 means this request is urgent.
20848 * This routine will do the following actions and then return a pointer to
20851 * 1. If private free xri count is empty, move some XRIs from public to
20853 * 2. Get one XRI from private free xri pool.
20854 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20855 * get one free xri from expedite pool.
20857 * Note: ndlp is only used on SCSI side for RRQ testing.
20858 * The caller should pass NULL for ndlp on NVME side.
20861 * pointer to one free IO buf - if private pool is not empty
20862 * NULL - if private pool is empty
20864 static struct lpfc_io_buf *
20865 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20866 struct lpfc_nodelist *ndlp,
20867 int hwqid, int expedite)
20869 struct lpfc_sli4_hdw_queue *qp;
20870 struct lpfc_multixri_pool *multixri_pool;
20871 struct lpfc_pvt_pool *pvt_pool;
20872 struct lpfc_io_buf *lpfc_ncmd;
20874 qp = &phba->sli4_hba.hdwq[hwqid];
20876 multixri_pool = qp->p_multixri_pool;
20877 pvt_pool = &multixri_pool->pvt_pool;
20878 multixri_pool->io_req_count++;
20880 /* If pvt_pool is empty, move some XRIs from public to private pool */
20881 if (pvt_pool->count == 0)
20882 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20884 /* Get one XRI from private free xri pool */
20885 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20888 lpfc_ncmd->hdwq = qp;
20889 lpfc_ncmd->hdwq_no = hwqid;
20890 } else if (expedite) {
20891 /* If we fail to get one from pvt_pool and this is an expedite
20892 * request, get one free xri from expedite pool.
20894 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20900 static inline struct lpfc_io_buf *
20901 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20903 struct lpfc_sli4_hdw_queue *qp;
20904 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20906 qp = &phba->sli4_hba.hdwq[idx];
20907 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20908 &qp->lpfc_io_buf_list_get, list) {
20909 if (lpfc_test_rrq_active(phba, ndlp,
20910 lpfc_cmd->cur_iocbq.sli4_lxritag))
20913 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20916 list_del_init(&lpfc_cmd->list);
20918 lpfc_cmd->hdwq = qp;
20919 lpfc_cmd->hdwq_no = idx;
20926 * lpfc_get_io_buf - Get one IO buffer from free pool
20927 * @phba: The HBA for which this call is being executed.
20928 * @ndlp: pointer to lpfc nodelist data structure.
20929 * @hwqid: belong to which HWQ
20930 * @expedite: 1 means this request is urgent.
20932 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20933 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20934 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20936 * Note: ndlp is only used on SCSI side for RRQ testing.
20937 * The caller should pass NULL for ndlp on NVME side.
20941 * Pointer to lpfc_io_buf - Success
20943 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20944 struct lpfc_nodelist *ndlp,
20945 u32 hwqid, int expedite)
20947 struct lpfc_sli4_hdw_queue *qp;
20948 unsigned long iflag;
20949 struct lpfc_io_buf *lpfc_cmd;
20951 qp = &phba->sli4_hba.hdwq[hwqid];
20954 if (phba->cfg_xri_rebalancing)
20955 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20956 phba, ndlp, hwqid, expedite);
20958 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20959 qp, alloc_xri_get);
20960 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20961 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20963 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20964 qp, alloc_xri_put);
20965 list_splice(&qp->lpfc_io_buf_list_put,
20966 &qp->lpfc_io_buf_list_get);
20967 qp->get_io_bufs += qp->put_io_bufs;
20968 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20969 qp->put_io_bufs = 0;
20970 spin_unlock(&qp->io_buf_list_put_lock);
20971 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20973 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20975 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20982 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20983 * @phba: The HBA for which this call is being executed.
20984 * @lpfc_buf: IO buf structure to append the SGL chunk
20986 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20987 * and will allocate an SGL chunk if the pool is empty.
20991 * Pointer to sli4_hybrid_sgl - Success
20993 struct sli4_hybrid_sgl *
20994 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20996 struct sli4_hybrid_sgl *list_entry = NULL;
20997 struct sli4_hybrid_sgl *tmp = NULL;
20998 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20999 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21000 struct list_head *buf_list = &hdwq->sgl_list;
21001 unsigned long iflags;
21003 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21005 if (likely(!list_empty(buf_list))) {
21006 /* break off 1 chunk from the sgl_list */
21007 list_for_each_entry_safe(list_entry, tmp,
21008 buf_list, list_node) {
21009 list_move_tail(&list_entry->list_node,
21010 &lpfc_buf->dma_sgl_xtra_list);
21014 /* allocate more */
21015 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21016 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21017 cpu_to_node(hdwq->io_wq->chann));
21019 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21020 "8353 error kmalloc memory for HDWQ "
21022 lpfc_buf->hdwq_no, __func__);
21026 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21027 GFP_ATOMIC, &tmp->dma_phys_sgl);
21028 if (!tmp->dma_sgl) {
21029 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21030 "8354 error pool_alloc memory for HDWQ "
21032 lpfc_buf->hdwq_no, __func__);
21037 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21038 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21041 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21042 struct sli4_hybrid_sgl,
21045 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21047 return allocated_sgl;
21051 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21052 * @phba: The HBA for which this call is being executed.
21053 * @lpfc_buf: IO buf structure with the SGL chunk
21055 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21062 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21065 struct sli4_hybrid_sgl *list_entry = NULL;
21066 struct sli4_hybrid_sgl *tmp = NULL;
21067 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21068 struct list_head *buf_list = &hdwq->sgl_list;
21069 unsigned long iflags;
21071 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21073 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21074 list_for_each_entry_safe(list_entry, tmp,
21075 &lpfc_buf->dma_sgl_xtra_list,
21077 list_move_tail(&list_entry->list_node,
21084 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21089 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21090 * @phba: phba object
21091 * @hdwq: hdwq to cleanup sgl buff resources on
21093 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21099 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21100 struct lpfc_sli4_hdw_queue *hdwq)
21102 struct list_head *buf_list = &hdwq->sgl_list;
21103 struct sli4_hybrid_sgl *list_entry = NULL;
21104 struct sli4_hybrid_sgl *tmp = NULL;
21105 unsigned long iflags;
21107 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21109 /* Free sgl pool */
21110 list_for_each_entry_safe(list_entry, tmp,
21111 buf_list, list_node) {
21112 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21113 list_entry->dma_sgl,
21114 list_entry->dma_phys_sgl);
21115 list_del(&list_entry->list_node);
21119 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21123 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21124 * @phba: The HBA for which this call is being executed.
21125 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21127 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21128 * and will allocate an CMD/RSP buffer if the pool is empty.
21132 * Pointer to fcp_cmd_rsp_buf - Success
21134 struct fcp_cmd_rsp_buf *
21135 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21136 struct lpfc_io_buf *lpfc_buf)
21138 struct fcp_cmd_rsp_buf *list_entry = NULL;
21139 struct fcp_cmd_rsp_buf *tmp = NULL;
21140 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21141 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21142 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21143 unsigned long iflags;
21145 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21147 if (likely(!list_empty(buf_list))) {
21148 /* break off 1 chunk from the list */
21149 list_for_each_entry_safe(list_entry, tmp,
21152 list_move_tail(&list_entry->list_node,
21153 &lpfc_buf->dma_cmd_rsp_list);
21157 /* allocate more */
21158 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21159 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21160 cpu_to_node(hdwq->io_wq->chann));
21162 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21163 "8355 error kmalloc memory for HDWQ "
21165 lpfc_buf->hdwq_no, __func__);
21169 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21171 &tmp->fcp_cmd_rsp_dma_handle);
21173 if (!tmp->fcp_cmnd) {
21174 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21175 "8356 error pool_alloc memory for HDWQ "
21177 lpfc_buf->hdwq_no, __func__);
21182 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21183 sizeof(struct fcp_cmnd));
21185 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21186 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21189 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21190 struct fcp_cmd_rsp_buf,
21193 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21195 return allocated_buf;
21199 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21200 * @phba: The HBA for which this call is being executed.
21201 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21203 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21210 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21211 struct lpfc_io_buf *lpfc_buf)
21214 struct fcp_cmd_rsp_buf *list_entry = NULL;
21215 struct fcp_cmd_rsp_buf *tmp = NULL;
21216 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21217 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21218 unsigned long iflags;
21220 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21222 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21223 list_for_each_entry_safe(list_entry, tmp,
21224 &lpfc_buf->dma_cmd_rsp_list,
21226 list_move_tail(&list_entry->list_node,
21233 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21238 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21239 * @phba: phba object
21240 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21242 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21248 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21249 struct lpfc_sli4_hdw_queue *hdwq)
21251 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21252 struct fcp_cmd_rsp_buf *list_entry = NULL;
21253 struct fcp_cmd_rsp_buf *tmp = NULL;
21254 unsigned long iflags;
21256 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21258 /* Free cmd_rsp buf pool */
21259 list_for_each_entry_safe(list_entry, tmp,
21262 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21263 list_entry->fcp_cmnd,
21264 list_entry->fcp_cmd_rsp_dma_handle);
21265 list_del(&list_entry->list_node);
21269 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);