1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
93 union lpfc_wqe128 lpfc_iread_cmd_template;
94 union lpfc_wqe128 lpfc_iwrite_cmd_template;
95 union lpfc_wqe128 lpfc_icmnd_cmd_template;
98 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
103 /* Setup WQE templates for IOs */
104 void lpfc_wqe_cmd_template(void)
106 union lpfc_wqe128 *wqe;
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
112 /* Word 0, 1, 2 - BDE is variable */
114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
116 /* Word 4 - total_xfer_len is variable */
118 /* Word 5 - is zero */
120 /* Word 6 - ctxt_tag, xri_tag is variable */
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
128 /* Word 8 - abort_tag is variable */
130 /* Word 9 - reqtag is variable */
132 /* Word 10 - dbde, wqes is variable */
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
139 /* Word 11 - pbde is variable */
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
144 /* Word 12 - is zero */
146 /* Word 13, 14, 15 - PBDE is variable */
148 /* IWRITE template */
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
152 /* Word 0, 1, 2 - BDE is variable */
154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
156 /* Word 4 - total_xfer_len is variable */
158 /* Word 5 - initial_xfer_len is variable */
160 /* Word 6 - ctxt_tag, xri_tag is variable */
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
168 /* Word 8 - abort_tag is variable */
170 /* Word 9 - reqtag is variable */
172 /* Word 10 - dbde, wqes is variable */
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
179 /* Word 11 - pbde is variable */
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
184 /* Word 12 - is zero */
186 /* Word 13, 14, 15 - PBDE is variable */
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
192 /* Word 0, 1, 2 - BDE is variable */
194 /* Word 3 - payload_offset_len is variable */
196 /* Word 4, 5 - is zero */
198 /* Word 6 - ctxt_tag, xri_tag is variable */
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
206 /* Word 8 - abort_tag is variable */
208 /* Word 9 - reqtag is variable */
210 /* Word 10 - dbde, wqes is variable */
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
222 /* Word 12, 13, 14, 15 - is zero */
225 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
228 * @srcp: Source memory pointer.
229 * @destp: Destination memory pointer.
230 * @cnt: Number of words required to be copied.
231 * Must be a multiple of sizeof(uint64_t)
233 * This function is used for copying data between driver memory
234 * and the SLI WQ. This function also changes the endianness
235 * of each word if native endianness is different from SLI
236 * endianness. This function can be called with or without
240 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
250 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
255 * @q: The Work Queue to operate on.
256 * @wqe: The work Queue Entry to put on the Work queue.
258 * This routine will copy the contents of @wqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
263 * The caller is expected to hold the hbalock when calling this routine.
266 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
268 union lpfc_wqe *temp_wqe;
269 struct lpfc_register doorbell;
276 /* sanity check on queue memory */
280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
282 /* If the host has not yet processed the next entry then we are done */
283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
289 /* set consumption flag every once in a while */
290 if (!((q->host_index + 1) % q->notify_interval))
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298 /* write to DPP aperture taking advatage of Combined Writes */
299 tmp = (uint8_t *)temp_wqe;
301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp + i)),
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
310 /* ensure WQE bcopy and DPP flushed before doorbell write */
313 /* Update the host index before invoking device */
314 host_index = q->host_index;
320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
332 /* Leave bits <23:16> clear for if_type 6 dpp */
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
345 writel(doorbell.word0, q->db_regaddr);
351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
352 * @q: The Work Queue to operate on.
353 * @index: The index to advance the hba index to.
355 * This routine will update the HBA index of a queue to reflect consumption of
356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
357 * an entry the host calls this function to update the queue's internal
361 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
363 /* sanity check on queue memory */
367 q->hba_index = index;
371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
372 * @q: The Mailbox Queue to operate on.
373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
375 * This routine will copy the contents of @mqe to the next available entry on
376 * the @q. This function will then ring the Work Queue Doorbell to signal the
377 * HBA to start processing the Work Queue Entry. This function returns 0 if
378 * successful. If no entries are available on @q then this function will return
380 * The caller is expected to hold the hbalock when calling this routine.
383 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
385 struct lpfc_mqe *temp_mqe;
386 struct lpfc_register doorbell;
388 /* sanity check on queue memory */
391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
393 /* If the host has not yet processed the next entry then we are done */
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
397 /* Save off the mailbox pointer for completion */
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
400 /* Update the host index before invoking device */
401 q->host_index = ((q->host_index + 1) % q->entry_count);
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
413 * @q: The Mailbox Queue to operate on.
415 * This routine will update the HBA index of a queue to reflect consumption of
416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
417 * an entry the host calls this function to update the queue's internal
418 * pointers. This routine returns the number of entries that were consumed by
422 lpfc_sli4_mq_release(struct lpfc_queue *q)
424 /* sanity check on queue memory */
428 /* Clear the mailbox pointer for completion */
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
436 * @q: The Event Queue to get the first valid EQE from
438 * This routine will get the first valid Event Queue Entry from @q, update
439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
441 * processed, but not popped back to the HBA then this routine will return NULL.
443 static struct lpfc_eqe *
444 lpfc_sli4_eq_get(struct lpfc_queue *q)
446 struct lpfc_eqe *eqe;
448 /* sanity check on queue memory */
451 eqe = lpfc_sli4_qe(q, q->host_index);
453 /* If the next EQE is not valid then we are done */
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
458 * insert barrier for instruction interlock : data from the hardware
459 * must have the valid bit checked before it can be copied and acted
460 * upon. Speculative instructions were allowing a bcopy at the start
461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
462 * after our return, to copy data before the valid bit check above
463 * was done. As such, some of the copied data was stale. The barrier
464 * ensures the check is before any data is copied.
471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
472 * @q: The Event Queue to disable interrupts
476 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
478 struct lpfc_register doorbell;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
491 * @q: The Event Queue to disable interrupts
495 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
497 struct lpfc_register doorbell;
500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
506 * @phba: adapter with EQ
507 * @q: The Event Queue that the host has completed processing for.
508 * @count: Number of elements that have been consumed
509 * @arm: Indicates whether the host wants to arms this CQ.
511 * This routine will notify the HBA, by ringing the doorbell, that count
512 * number of EQEs have been processed. The @arm parameter indicates whether
513 * the queue should be rearmed when ringing the doorbell.
516 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
519 struct lpfc_register doorbell;
521 /* sanity check on queue memory */
522 if (unlikely(!q || (count == 0 && !arm)))
525 /* ring doorbell for number popped */
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
539 readl(q->phba->sli4_hba.EQDBregaddr);
543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
544 * @phba: adapter with EQ
545 * @q: The Event Queue that the host has completed processing for.
546 * @count: Number of elements that have been consumed
547 * @arm: Indicates whether the host wants to arms this CQ.
549 * This routine will notify the HBA, by ringing the doorbell, that count
550 * number of EQEs have been processed. The @arm parameter indicates whether
551 * the queue should be rearmed when ringing the doorbell.
554 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
557 struct lpfc_register doorbell;
559 /* sanity check on queue memory */
560 if (unlikely(!q || (count == 0 && !arm)))
563 /* ring doorbell for number popped */
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
576 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
584 /* if the index wrapped around, toggle the valid bit */
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
590 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
598 /* walk all the EQ entries and drop on the floor */
599 eqe = lpfc_sli4_eq_get(eq);
601 /* Get the reference to the corresponding CQ */
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
611 /* If CQ is valid, iterate through it and drop all the CQEs */
613 cqe = lpfc_sli4_cq_get(cq);
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
617 cqe = lpfc_sli4_cq_get(cq);
619 /* Clear and re-arm the CQ */
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
626 eqe = lpfc_sli4_eq_get(eq);
629 /* Clear and re-arm the EQ */
630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
634 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
643 eqe = lpfc_sli4_eq_get(eq);
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
649 if (!(++count % eq->max_proc_limit))
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
658 eqe = lpfc_sli4_eq_get(eq);
660 eq->EQ_processed += count;
662 /* Track the max number of EQEs processed in 1 intr */
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
666 xchg(&eq->queue_claimed, 0);
669 /* Always clear the EQ. */
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
677 * @q: The Completion Queue to get the first valid CQE from
679 * This routine will get the first valid Completion Queue Entry from @q, update
680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
682 * processed, but not popped back to the HBA then this routine will return NULL.
684 static struct lpfc_cqe *
685 lpfc_sli4_cq_get(struct lpfc_queue *q)
687 struct lpfc_cqe *cqe;
689 /* sanity check on queue memory */
692 cqe = lpfc_sli4_qe(q, q->host_index);
694 /* If the next CQE is not valid then we are done */
695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
699 * insert barrier for instruction interlock : data from the hardware
700 * must have the valid bit checked before it can be copied and acted
701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
702 * instructions allowing action on content before valid bit checked,
703 * add barrier here as well. May not be needed as "content" is a
704 * single 32-bit entity here (vs multi word structure for cq's).
711 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
719 /* if the index wrapped around, toggle the valid bit */
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
726 * @phba: the adapter with the CQ
727 * @q: The Completion Queue that the host has completed processing for.
728 * @count: the number of elements that were consumed
729 * @arm: Indicates whether the host wants to arms this CQ.
731 * This routine will notify the HBA, by ringing the doorbell, that the
732 * CQEs have been processed. The @arm parameter specifies whether the
733 * queue should be rearmed when ringing the doorbell.
736 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
739 struct lpfc_register doorbell;
741 /* sanity check on queue memory */
742 if (unlikely(!q || (count == 0 && !arm)))
745 /* ring doorbell for number popped */
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
759 * @phba: the adapter with the CQ
760 * @q: The Completion Queue that the host has completed processing for.
761 * @count: the number of elements that were consumed
762 * @arm: Indicates whether the host wants to arms this CQ.
764 * This routine will notify the HBA, by ringing the doorbell, that the
765 * CQEs have been processed. The @arm parameter specifies whether the
766 * queue should be rearmed when ringing the doorbell.
769 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
772 struct lpfc_register doorbell;
774 /* sanity check on queue memory */
775 if (unlikely(!q || (count == 0 && !arm)))
778 /* ring doorbell for number popped */
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
790 * This routine will copy the contents of @wqe to the next available entry on
791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
792 * HBA to start processing the Receive Queue Entry. This function returns the
793 * index that the rqe was copied to if successful. If no entries are available
794 * on @q then this function will return -ENOMEM.
795 * The caller is expected to hold the hbalock when calling this routine.
798 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
803 struct lpfc_register doorbell;
807 /* sanity check on queue memory */
808 if (unlikely(!hq) || unlikely(!dq))
810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
817 if (hq_put_index != dq_put_index)
819 /* If the host has not yet processed the next entry then we are done */
820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
825 /* Update the host index to point to the next slot */
826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
830 /* Ring The Header Receive Queue Doorbell */
831 if (!(hq->host_index % hq->notify_interval)) {
833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
835 hq->notify_interval);
836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
846 writel(doorbell.word0, hq->db_regaddr);
852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
854 * This routine will update the HBA index of a queue to reflect consumption of
855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
856 * consumed an entry the host calls this function to update the queue's
857 * internal pointers. This routine returns the number of entries that were
858 * consumed by the HBA.
861 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
863 /* sanity check on queue memory */
864 if (unlikely(!hq) || unlikely(!dq))
867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
876 * @phba: Pointer to HBA context object.
877 * @pring: Pointer to driver SLI ring object.
879 * This function returns pointer to next command iocb entry
880 * in the command ring. The caller must hold hbalock to prevent
881 * other threads consume the next command iocb.
882 * SLI-2/SLI-3 provide different sized iocbs.
884 static inline IOCB_t *
885 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
892 * lpfc_resp_iocb - Get next response iocb entry in the ring
893 * @phba: Pointer to HBA context object.
894 * @pring: Pointer to driver SLI ring object.
896 * This function returns pointer to next response iocb entry
897 * in the response ring. The caller must hold hbalock to make sure
898 * that no other thread consume the next response iocb.
899 * SLI-2/SLI-3 provide different sized iocbs.
901 static inline IOCB_t *
902 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
910 * @phba: Pointer to HBA context object.
912 * This function is called with hbalock held. This function
913 * allocates a new driver iocb object from the iocb pool. If the
914 * allocation is successful, it returns pointer to the newly
915 * allocated iocb object else it returns NULL.
918 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
923 lockdep_assert_held(&phba->hbalock);
925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
935 * @phba: Pointer to HBA context object.
936 * @xritag: XRI value.
938 * This function clears the sglq pointer from the array of active
939 * sglq's. The xritag that is passed in is used to index into the
940 * array. Before the xritag can be used it needs to be adjusted
941 * by subtracting the xribase.
943 * Returns sglq ponter = success, NULL = Failure.
946 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
948 struct lpfc_sglq *sglq;
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
957 * @phba: Pointer to HBA context object.
958 * @xritag: XRI value.
960 * This function returns the sglq pointer from the array of active
961 * sglq's. The xritag that is passed in is used to index into the
962 * array. Before the xritag can be used it needs to be adjusted
963 * by subtracting the xribase.
965 * Returns sglq ponter = success, NULL = Failure.
968 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
970 struct lpfc_sglq *sglq;
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
978 * @phba: Pointer to HBA context object.
979 * @xritag: xri used in this exchange.
980 * @rrq: The RRQ to be cleared.
984 lpfc_clr_rrq_active(struct lpfc_hba *phba,
986 struct lpfc_node_rrq *rrq)
988 struct lpfc_nodelist *ndlp = NULL;
990 /* Lookup did to verify if did is still active on this vport */
992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1000 rrq->rrq_stop_time = 0;
1003 mempool_free(rrq, phba->rrq_pool);
1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1008 * @phba: Pointer to HBA context object.
1010 * This function is called with hbalock held. This function
1011 * Checks if stop_time (ratov from setting rrq active) has
1012 * been reached, if it has and the send_rrq flag is set then
1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1014 * then it will just call the routine to clear the rrq and
1015 * free the rrq resource.
1016 * The timer is set to the next rrq that is going to expire before
1017 * leaving the routine.
1021 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1023 struct lpfc_node_rrq *rrq;
1024 struct lpfc_node_rrq *nextrrq;
1025 unsigned long next_time;
1026 unsigned long iflags;
1027 LIST_HEAD(send_rrq);
1029 spin_lock_irqsave(&phba->hbalock, iflags);
1030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032 list_for_each_entry_safe(rrq, nextrrq,
1033 &phba->active_rrq_list, list) {
1034 if (time_after(jiffies, rrq->rrq_stop_time))
1035 list_move(&rrq->list, &send_rrq);
1036 else if (time_before(rrq->rrq_stop_time, next_time))
1037 next_time = rrq->rrq_stop_time;
1039 spin_unlock_irqrestore(&phba->hbalock, iflags);
1040 if ((!list_empty(&phba->active_rrq_list)) &&
1041 (!(phba->pport->load_flag & FC_UNLOADING)))
1042 mod_timer(&phba->rrq_tmr, next_time);
1043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044 list_del(&rrq->list);
1045 if (!rrq->send_rrq) {
1046 /* this call will free the rrq */
1047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048 } else if (lpfc_send_rrq(phba, rrq)) {
1049 /* if we send the rrq then the completion handler
1050 * will clear the bit in the xribitmap.
1052 lpfc_clr_rrq_active(phba, rrq->xritag,
1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1060 * @vport: Pointer to vport context object.
1061 * @xri: The xri used in the exchange.
1062 * @did: The targets DID for this exchange.
1064 * returns NULL = rrq not found in the phba->active_rrq_list.
1065 * rrq = rrq for this xri and target.
1067 struct lpfc_node_rrq *
1068 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1070 struct lpfc_hba *phba = vport->phba;
1071 struct lpfc_node_rrq *rrq;
1072 struct lpfc_node_rrq *nextrrq;
1073 unsigned long iflags;
1075 if (phba->sli_rev != LPFC_SLI_REV4)
1077 spin_lock_irqsave(&phba->hbalock, iflags);
1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079 if (rrq->vport == vport && rrq->xritag == xri &&
1080 rrq->nlp_DID == did){
1081 list_del(&rrq->list);
1082 spin_unlock_irqrestore(&phba->hbalock, iflags);
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1092 * @vport: Pointer to vport context object.
1093 * @ndlp: Pointer to the lpfc_node_list structure.
1094 * If ndlp is NULL Remove all active RRQs for this vport from the
1095 * phba->active_rrq_list and clear the rrq.
1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1099 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1102 struct lpfc_hba *phba = vport->phba;
1103 struct lpfc_node_rrq *rrq;
1104 struct lpfc_node_rrq *nextrrq;
1105 unsigned long iflags;
1106 LIST_HEAD(rrq_list);
1108 if (phba->sli_rev != LPFC_SLI_REV4)
1111 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1114 spin_lock_irqsave(&phba->hbalock, iflags);
1115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116 if (rrq->vport != vport)
1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120 list_move(&rrq->list, &rrq_list);
1123 spin_unlock_irqrestore(&phba->hbalock, iflags);
1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126 list_del(&rrq->list);
1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1133 * @phba: Pointer to HBA context object.
1134 * @ndlp: Targets nodelist pointer for this exchange.
1135 * @xritag: the xri in the bitmap to test.
1137 * This function returns:
1138 * 0 = rrq not active for this xri
1139 * 1 = rrq is valid for this xri.
1142 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1147 if (!ndlp->active_rrqs_xri_bitmap)
1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1157 * @phba: Pointer to HBA context object.
1158 * @ndlp: nodelist pointer for this target.
1159 * @xritag: xri used in this exchange.
1160 * @rxid: Remote Exchange ID.
1161 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1163 * This function takes the hbalock.
1164 * The active bit is always set in the active rrq xri_bitmap even
1165 * if there is no slot avaiable for the other rrq information.
1167 * returns 0 rrq actived for this xri
1168 * < 0 No memory or invalid ndlp.
1171 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1174 unsigned long iflags;
1175 struct lpfc_node_rrq *rrq;
1181 if (!phba->cfg_enable_rrq)
1184 spin_lock_irqsave(&phba->hbalock, iflags);
1185 if (phba->pport->load_flag & FC_UNLOADING) {
1186 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1190 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1193 if (!ndlp->active_rrqs_xri_bitmap)
1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1199 spin_unlock_irqrestore(&phba->hbalock, iflags);
1200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204 " DID:0x%x Send:%d\n",
1205 xritag, rxid, ndlp->nlp_DID, send_rrq);
1208 if (phba->cfg_enable_rrq == 1)
1209 rrq->send_rrq = send_rrq;
1212 rrq->xritag = xritag;
1213 rrq->rrq_stop_time = jiffies +
1214 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215 rrq->nlp_DID = ndlp->nlp_DID;
1216 rrq->vport = ndlp->vport;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 empty = list_empty(&phba->active_rrq_list);
1220 list_add_tail(&rrq->list, &phba->active_rrq_list);
1221 phba->hba_flag |= HBA_RRQ_ACTIVE;
1223 lpfc_worker_wake_up(phba);
1224 spin_unlock_irqrestore(&phba->hbalock, iflags);
1227 spin_unlock_irqrestore(&phba->hbalock, iflags);
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237 * @phba: Pointer to HBA context object.
1238 * @piocbq: Pointer to the iocbq.
1240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage. This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
1246 static struct lpfc_sglq *
1247 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 struct lpfc_sglq *sglq = NULL;
1251 struct lpfc_sglq *start_sglq = NULL;
1252 struct lpfc_io_buf *lpfc_cmd;
1253 struct lpfc_nodelist *ndlp;
1254 struct lpfc_sli_ring *pring = NULL;
1257 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258 pring = phba->sli4_hba.nvmels_wq->pring;
1260 pring = lpfc_phba_elsring(phba);
1262 lockdep_assert_held(&pring->ring_lock);
1264 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1265 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266 ndlp = lpfc_cmd->rdata->pnode;
1267 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269 ndlp = piocbq->context_un.ndlp;
1270 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1274 ndlp = piocbq->context_un.ndlp;
1276 ndlp = piocbq->context1;
1279 spin_lock(&phba->sli4_hba.sgl_list_lock);
1280 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1285 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286 test_bit(sglq->sli4_lxritag,
1287 ndlp->active_rrqs_xri_bitmap)) {
1288 /* This xri has an rrq outstanding for this DID.
1289 * put it back in the list and get another xri.
1291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1293 list_remove_head(lpfc_els_sgl_list, sglq,
1294 struct lpfc_sglq, list);
1295 if (sglq == start_sglq) {
1296 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1304 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305 sglq->state = SGL_ALLOCATED;
1307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1312 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1313 * @phba: Pointer to HBA context object.
1314 * @piocbq: Pointer to the iocbq.
1316 * This function is called with the sgl_list lock held. This function
1317 * gets a new driver sglq object from the sglq list. If the
1318 * list is not empty then it is successful, it returns pointer to the newly
1319 * allocated sglq object else it returns NULL.
1322 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1324 struct list_head *lpfc_nvmet_sgl_list;
1325 struct lpfc_sglq *sglq = NULL;
1327 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1329 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1331 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1334 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335 sglq->state = SGL_ALLOCATED;
1340 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1341 * @phba: Pointer to HBA context object.
1343 * This function is called with no lock held. This function
1344 * allocates a new driver iocb object from the iocb pool. If the
1345 * allocation is successful, it returns pointer to the newly
1346 * allocated iocb object else it returns NULL.
1349 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1351 struct lpfc_iocbq * iocbq = NULL;
1352 unsigned long iflags;
1354 spin_lock_irqsave(&phba->hbalock, iflags);
1355 iocbq = __lpfc_sli_get_iocbq(phba);
1356 spin_unlock_irqrestore(&phba->hbalock, iflags);
1361 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1362 * @phba: Pointer to HBA context object.
1363 * @iocbq: Pointer to driver iocb object.
1365 * This function is called to release the driver iocb object
1366 * to the iocb pool. The iotag in the iocb object
1367 * does not change for each use of the iocb object. This function
1368 * clears all other fields of the iocb object when it is freed.
1369 * The sqlq structure that holds the xritag and phys and virtual
1370 * mappings for the scatter gather list is retrieved from the
1371 * active array of sglq. The get of the sglq pointer also clears
1372 * the entry in the array. If the status of the IO indiactes that
1373 * this IO was aborted then the sglq entry it put on the
1374 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1375 * IO has good status or fails for any other reason then the sglq
1376 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1377 * asserted held in the code path calling this routine.
1380 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1382 struct lpfc_sglq *sglq;
1383 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384 unsigned long iflag = 0;
1385 struct lpfc_sli_ring *pring;
1387 if (iocbq->sli4_xritag == NO_XRI)
1390 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1394 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1397 sglq->state = SGL_FREED;
1399 list_add_tail(&sglq->list,
1400 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401 spin_unlock_irqrestore(
1402 &phba->sli4_hba.sgl_list_lock, iflag);
1406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407 (sglq->state != SGL_XRI_ABORTED)) {
1408 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1411 /* Check if we can get a reference on ndlp */
1412 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1415 list_add(&sglq->list,
1416 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417 spin_unlock_irqrestore(
1418 &phba->sli4_hba.sgl_list_lock, iflag);
1420 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1422 sglq->state = SGL_FREED;
1424 list_add_tail(&sglq->list,
1425 &phba->sli4_hba.lpfc_els_sgl_list);
1426 spin_unlock_irqrestore(
1427 &phba->sli4_hba.sgl_list_lock, iflag);
1428 pring = lpfc_phba_elsring(phba);
1429 /* Check if TXQ queue needs to be serviced */
1430 if (pring && (!list_empty(&pring->txq)))
1431 lpfc_worker_wake_up(phba);
1437 * Clean all volatile data fields, preserve iotag and node struct.
1439 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440 iocbq->sli4_lxritag = NO_XRI;
1441 iocbq->sli4_xritag = NO_XRI;
1442 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1444 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1449 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1450 * @phba: Pointer to HBA context object.
1451 * @iocbq: Pointer to driver iocb object.
1453 * This function is called to release the driver iocb object to the
1454 * iocb pool. The iotag in the iocb object does not change for each
1455 * use of the iocb object. This function clears all other fields of
1456 * the iocb object when it is freed. The hbalock is asserted held in
1457 * the code path calling this routine.
1460 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1462 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1465 * Clean all volatile data fields, preserve iotag and node struct.
1467 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468 iocbq->sli4_xritag = NO_XRI;
1469 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1473 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1474 * @phba: Pointer to HBA context object.
1475 * @iocbq: Pointer to driver iocb object.
1477 * This function is called with hbalock held to release driver
1478 * iocb object to the iocb pool. The iotag in the iocb object
1479 * does not change for each use of the iocb object. This function
1480 * clears all other fields of the iocb object when it is freed.
1483 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1485 lockdep_assert_held(&phba->hbalock);
1487 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1492 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1493 * @phba: Pointer to HBA context object.
1494 * @iocbq: Pointer to driver iocb object.
1496 * This function is called with no lock held to release the iocb to
1500 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1502 unsigned long iflags;
1505 * Clean all volatile data fields, preserve iotag and node struct.
1507 spin_lock_irqsave(&phba->hbalock, iflags);
1508 __lpfc_sli_release_iocbq(phba, iocbq);
1509 spin_unlock_irqrestore(&phba->hbalock, iflags);
1513 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1514 * @phba: Pointer to HBA context object.
1515 * @iocblist: List of IOCBs.
1516 * @ulpstatus: ULP status in IOCB command field.
1517 * @ulpWord4: ULP word-4 in IOCB command field.
1519 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1520 * on the list by invoking the complete callback function associated with the
1521 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1525 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526 uint32_t ulpstatus, uint32_t ulpWord4)
1528 struct lpfc_iocbq *piocb;
1530 while (!list_empty(iocblist)) {
1531 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532 if (piocb->wqe_cmpl) {
1533 if (piocb->iocb_flag & LPFC_IO_NVME)
1534 lpfc_nvme_cancel_iocb(phba, piocb,
1535 ulpstatus, ulpWord4);
1537 lpfc_sli_release_iocbq(phba, piocb);
1539 } else if (piocb->iocb_cmpl) {
1540 piocb->iocb.ulpStatus = ulpstatus;
1541 piocb->iocb.un.ulpWord[4] = ulpWord4;
1542 (piocb->iocb_cmpl) (phba, piocb, piocb);
1544 lpfc_sli_release_iocbq(phba, piocb);
1551 * lpfc_sli_iocb_cmd_type - Get the iocb type
1552 * @iocb_cmnd: iocb command code.
1554 * This function is called by ring event handler function to get the iocb type.
1555 * This function translates the iocb command to an iocb command type used to
1556 * decide the final disposition of each completed IOCB.
1557 * The function returns
1558 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1559 * LPFC_SOL_IOCB if it is a solicited iocb completion
1560 * LPFC_ABORT_IOCB if it is an abort iocb
1561 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1563 * The caller is not required to hold any lock.
1565 static lpfc_iocb_type
1566 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1568 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1570 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1573 switch (iocb_cmnd) {
1574 case CMD_XMIT_SEQUENCE_CR:
1575 case CMD_XMIT_SEQUENCE_CX:
1576 case CMD_XMIT_BCAST_CN:
1577 case CMD_XMIT_BCAST_CX:
1578 case CMD_ELS_REQUEST_CR:
1579 case CMD_ELS_REQUEST_CX:
1580 case CMD_CREATE_XRI_CR:
1581 case CMD_CREATE_XRI_CX:
1582 case CMD_GET_RPI_CN:
1583 case CMD_XMIT_ELS_RSP_CX:
1584 case CMD_GET_RPI_CR:
1585 case CMD_FCP_IWRITE_CR:
1586 case CMD_FCP_IWRITE_CX:
1587 case CMD_FCP_IREAD_CR:
1588 case CMD_FCP_IREAD_CX:
1589 case CMD_FCP_ICMND_CR:
1590 case CMD_FCP_ICMND_CX:
1591 case CMD_FCP_TSEND_CX:
1592 case CMD_FCP_TRSP_CX:
1593 case CMD_FCP_TRECEIVE_CX:
1594 case CMD_FCP_AUTO_TRSP_CX:
1595 case CMD_ADAPTER_MSG:
1596 case CMD_ADAPTER_DUMP:
1597 case CMD_XMIT_SEQUENCE64_CR:
1598 case CMD_XMIT_SEQUENCE64_CX:
1599 case CMD_XMIT_BCAST64_CN:
1600 case CMD_XMIT_BCAST64_CX:
1601 case CMD_ELS_REQUEST64_CR:
1602 case CMD_ELS_REQUEST64_CX:
1603 case CMD_FCP_IWRITE64_CR:
1604 case CMD_FCP_IWRITE64_CX:
1605 case CMD_FCP_IREAD64_CR:
1606 case CMD_FCP_IREAD64_CX:
1607 case CMD_FCP_ICMND64_CR:
1608 case CMD_FCP_ICMND64_CX:
1609 case CMD_FCP_TSEND64_CX:
1610 case CMD_FCP_TRSP64_CX:
1611 case CMD_FCP_TRECEIVE64_CX:
1612 case CMD_GEN_REQUEST64_CR:
1613 case CMD_GEN_REQUEST64_CX:
1614 case CMD_XMIT_ELS_RSP64_CX:
1615 case DSSCMD_IWRITE64_CR:
1616 case DSSCMD_IWRITE64_CX:
1617 case DSSCMD_IREAD64_CR:
1618 case DSSCMD_IREAD64_CX:
1619 case CMD_SEND_FRAME:
1620 type = LPFC_SOL_IOCB;
1622 case CMD_ABORT_XRI_CN:
1623 case CMD_ABORT_XRI_CX:
1624 case CMD_CLOSE_XRI_CN:
1625 case CMD_CLOSE_XRI_CX:
1626 case CMD_XRI_ABORTED_CX:
1627 case CMD_ABORT_MXRI64_CN:
1628 case CMD_XMIT_BLS_RSP64_CX:
1629 type = LPFC_ABORT_IOCB;
1631 case CMD_RCV_SEQUENCE_CX:
1632 case CMD_RCV_ELS_REQ_CX:
1633 case CMD_RCV_SEQUENCE64_CX:
1634 case CMD_RCV_ELS_REQ64_CX:
1635 case CMD_ASYNC_STATUS:
1636 case CMD_IOCB_RCV_SEQ64_CX:
1637 case CMD_IOCB_RCV_ELS64_CX:
1638 case CMD_IOCB_RCV_CONT64_CX:
1639 case CMD_IOCB_RET_XRI64_CX:
1640 type = LPFC_UNSOL_IOCB;
1642 case CMD_IOCB_XMIT_MSEQ64_CR:
1643 case CMD_IOCB_XMIT_MSEQ64_CX:
1644 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645 case CMD_IOCB_RCV_ELS_LIST64_CX:
1646 case CMD_IOCB_CLOSE_EXTENDED_CN:
1647 case CMD_IOCB_ABORT_EXTENDED_CN:
1648 case CMD_IOCB_RET_HBQE64_CN:
1649 case CMD_IOCB_FCP_IBIDIR64_CR:
1650 case CMD_IOCB_FCP_IBIDIR64_CX:
1651 case CMD_IOCB_FCP_ITASKMGT64_CX:
1652 case CMD_IOCB_LOGENTRY_CN:
1653 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654 printk("%s - Unhandled SLI-3 Command x%x\n",
1655 __func__, iocb_cmnd);
1656 type = LPFC_UNKNOWN_IOCB;
1659 type = LPFC_UNKNOWN_IOCB;
1667 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1668 * @phba: Pointer to HBA context object.
1670 * This function is called from SLI initialization code
1671 * to configure every ring of the HBA's SLI interface. The
1672 * caller is not required to hold any lock. This function issues
1673 * a config_ring mailbox command for each ring.
1674 * This function returns zero if successful else returns a negative
1678 lpfc_sli_ring_map(struct lpfc_hba *phba)
1680 struct lpfc_sli *psli = &phba->sli;
1685 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1689 phba->link_state = LPFC_INIT_MBX_CMDS;
1690 for (i = 0; i < psli->num_rings; i++) {
1691 lpfc_config_ring(phba, i, pmb);
1692 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693 if (rc != MBX_SUCCESS) {
1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695 "0446 Adapter failed to init (%d), "
1696 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1698 rc, pmbox->mbxCommand,
1699 pmbox->mbxStatus, i);
1700 phba->link_state = LPFC_HBA_ERROR;
1705 mempool_free(pmb, phba->mbox_mem_pool);
1710 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1711 * @phba: Pointer to HBA context object.
1712 * @pring: Pointer to driver SLI ring object.
1713 * @piocb: Pointer to the driver iocb object.
1715 * The driver calls this function with the hbalock held for SLI3 ports or
1716 * the ring lock held for SLI4 ports. The function adds the
1717 * new iocb to txcmplq of the given ring. This function always returns
1718 * 0. If this function is called for ELS ring, this function checks if
1719 * there is a vport associated with the ELS command. This function also
1720 * starts els_tmofunc timer if this is an ELS command.
1723 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
1726 if (phba->sli_rev == LPFC_SLI_REV4)
1727 lockdep_assert_held(&pring->ring_lock);
1729 lockdep_assert_held(&phba->hbalock);
1733 list_add_tail(&piocb->list, &pring->txcmplq);
1734 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735 pring->txcmplq_cnt++;
1737 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740 BUG_ON(!piocb->vport);
1741 if (!(piocb->vport->load_flag & FC_UNLOADING))
1742 mod_timer(&piocb->vport->els_tmofunc,
1744 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1751 * lpfc_sli_ringtx_get - Get first element of the txq
1752 * @phba: Pointer to HBA context object.
1753 * @pring: Pointer to driver SLI ring object.
1755 * This function is called with hbalock held to get next
1756 * iocb in txq of the given ring. If there is any iocb in
1757 * the txq, the function returns first iocb in the list after
1758 * removing the iocb from the list, else it returns NULL.
1761 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1763 struct lpfc_iocbq *cmd_iocb;
1765 lockdep_assert_held(&phba->hbalock);
1767 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1772 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1773 * @phba: Pointer to HBA context object.
1774 * @cmdiocb: Pointer to driver command iocb object.
1775 * @cmf_cmpl: Pointer to completed WCQE.
1777 * This routine will inform the driver of any BW adjustments we need
1778 * to make. These changes will be picked up during the next CMF
1779 * timer interrupt. In addition, any BW changes will be logged
1780 * with LOG_CGN_MGMT.
1783 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1784 struct lpfc_wcqe_complete *cmf_cmpl)
1786 union lpfc_wqe128 *wqe;
1787 uint32_t status, info;
1788 uint64_t bw, bwdif, slop;
1789 uint64_t pcent, bwpcent;
1790 int asig, afpin, sigcnt, fpincnt;
1791 int wsigmax, wfpinmax, cg, tdp;
1794 /* First check for error */
1795 status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
1797 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1798 "6211 CMF_SYNC_WQE Error "
1799 "req_tag x%x status x%x hwstatus x%x "
1800 "tdatap x%x parm x%x\n",
1801 bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
1802 bf_get(lpfc_wcqe_c_status, cmf_cmpl),
1803 bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
1804 cmf_cmpl->total_data_placed,
1805 cmf_cmpl->parameter);
1809 /* Gather congestion information on a successful cmpl */
1810 info = cmf_cmpl->parameter;
1811 phba->cmf_active_info = info;
1813 /* See if firmware info count is valid or has changed */
1814 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1817 phba->cmf_info_per_interval = info;
1819 tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
1820 cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
1822 /* Get BW requirement from firmware */
1823 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1825 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1826 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1827 bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
1831 /* Gather information needed for logging if a BW change is required */
1832 wqe = &cmdiocb->wqe;
1833 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1834 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1835 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1836 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1837 if (phba->cmf_max_bytes_per_interval != bw ||
1838 (asig || afpin || sigcnt || fpincnt)) {
1839 /* Are we increasing or decreasing BW */
1840 if (phba->cmf_max_bytes_per_interval < bw) {
1841 bwdif = bw - phba->cmf_max_bytes_per_interval;
1844 bwdif = phba->cmf_max_bytes_per_interval - bw;
1848 /* What is the change percentage */
1849 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1850 pcent = div64_u64(bwdif * 100 + slop,
1851 phba->cmf_link_byte_count);
1852 bwpcent = div64_u64(bw * 100 + slop,
1853 phba->cmf_link_byte_count);
1855 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1856 "6237 BW Threshold %lld%% (%lld): "
1857 "%lld%% %s: Signal Alarm: cg:%d "
1859 bwpcent, bw, pcent, s, cg,
1860 phba->cmf_active_info);
1862 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1863 "6238 BW Threshold %lld%% (%lld): "
1864 "%lld%% %s: FPIN Alarm: cg:%d "
1866 bwpcent, bw, pcent, s, cg,
1867 phba->cmf_active_info);
1868 } else if (sigcnt) {
1869 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1870 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1871 "6239 BW Threshold %lld%% (%lld): "
1872 "%lld%% %s: Signal Warning: "
1873 "Cnt %d Max %d: cg:%d Info:%u\n",
1874 bwpcent, bw, pcent, s, sigcnt,
1875 wsigmax, cg, phba->cmf_active_info);
1876 } else if (fpincnt) {
1877 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1878 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1879 "6240 BW Threshold %lld%% (%lld): "
1880 "%lld%% %s: FPIN Warning: "
1881 "Cnt %d Max %d: cg:%d Info:%u\n",
1882 bwpcent, bw, pcent, s, fpincnt,
1883 wfpinmax, cg, phba->cmf_active_info);
1885 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1886 "6241 BW Threshold %lld%% (%lld): "
1887 "CMF %lld%% %s: cg:%d Info:%u\n",
1888 bwpcent, bw, pcent, s, cg,
1889 phba->cmf_active_info);
1892 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1893 "6246 Info Threshold %u\n", info);
1896 /* Save BW change to be picked up during next timer interrupt */
1897 phba->cmf_last_sync_bw = bw;
1899 lpfc_sli_release_iocbq(phba, cmdiocb);
1903 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1904 * @phba: Pointer to HBA context object.
1905 * @ms: ms to set in WQE interval, 0 means use init op
1906 * @total: Total rcv bytes for this interval
1908 * This routine is called every CMF timer interrupt. Its purpose is
1909 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1910 * that may indicate we have congestion (FPINs or Signals). Upon
1911 * completion, the firmware will indicate any BW restrictions the
1912 * driver may need to take.
1915 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1917 union lpfc_wqe128 *wqe;
1918 struct lpfc_iocbq *sync_buf;
1919 unsigned long iflags;
1921 u32 atot, wtot, max;
1923 /* First address any alarm / warning activity */
1924 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1925 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1927 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1928 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1929 phba->link_state == LPFC_LINK_DOWN)
1932 spin_lock_irqsave(&phba->hbalock, iflags);
1933 sync_buf = __lpfc_sli_get_iocbq(phba);
1935 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1936 "6213 No available WQEs for CMF_SYNC_WQE\n");
1941 wqe = &sync_buf->wqe;
1943 /* WQEs are reused. Clear stale data and set key fields to zero */
1944 memset(wqe, 0, sizeof(*wqe));
1946 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1948 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1949 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1951 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1952 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1956 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1957 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1959 /* Check for alarms / warnings */
1961 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1962 /* We hit an Signal alarm condition */
1963 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1965 /* We hit a FPIN alarm condition */
1966 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1969 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1970 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1971 /* We hit an Signal warning condition */
1972 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1973 lpfc_acqe_cgn_frequency;
1974 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1975 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1977 /* We hit a FPIN warning condition */
1978 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1979 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1983 /* Update total read blocks during previous timer interval */
1984 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1987 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1988 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1989 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1991 /* Setup reqtag to match the wqe completion. */
1992 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1994 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1996 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
1997 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
1998 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2000 sync_buf->vport = phba->pport;
2001 sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
2002 sync_buf->iocb_cmpl = NULL;
2003 sync_buf->context1 = NULL;
2004 sync_buf->context2 = NULL;
2005 sync_buf->context3 = NULL;
2006 sync_buf->sli4_xritag = NO_XRI;
2008 sync_buf->iocb_flag |= LPFC_IO_CMF;
2009 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2011 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2012 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2015 spin_unlock_irqrestore(&phba->hbalock, iflags);
2020 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2021 * @phba: Pointer to HBA context object.
2022 * @pring: Pointer to driver SLI ring object.
2024 * This function is called with hbalock held and the caller must post the
2025 * iocb without releasing the lock. If the caller releases the lock,
2026 * iocb slot returned by the function is not guaranteed to be available.
2027 * The function returns pointer to the next available iocb slot if there
2028 * is available slot in the ring, else it returns NULL.
2029 * If the get index of the ring is ahead of the put index, the function
2030 * will post an error attention event to the worker thread to take the
2031 * HBA to offline state.
2034 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2036 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2037 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2039 lockdep_assert_held(&phba->hbalock);
2041 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2042 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2043 pring->sli.sli3.next_cmdidx = 0;
2045 if (unlikely(pring->sli.sli3.local_getidx ==
2046 pring->sli.sli3.next_cmdidx)) {
2048 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2050 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2052 "0315 Ring %d issue: portCmdGet %d "
2053 "is bigger than cmd ring %d\n",
2055 pring->sli.sli3.local_getidx,
2058 phba->link_state = LPFC_HBA_ERROR;
2060 * All error attention handlers are posted to
2063 phba->work_ha |= HA_ERATT;
2064 phba->work_hs = HS_FFER3;
2066 lpfc_worker_wake_up(phba);
2071 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2075 return lpfc_cmd_iocb(phba, pring);
2079 * lpfc_sli_next_iotag - Get an iotag for the iocb
2080 * @phba: Pointer to HBA context object.
2081 * @iocbq: Pointer to driver iocb object.
2083 * This function gets an iotag for the iocb. If there is no unused iotag and
2084 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2085 * array and assigns a new iotag.
2086 * The function returns the allocated iotag if successful, else returns zero.
2087 * Zero is not a valid iotag.
2088 * The caller is not required to hold any lock.
2091 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2093 struct lpfc_iocbq **new_arr;
2094 struct lpfc_iocbq **old_arr;
2096 struct lpfc_sli *psli = &phba->sli;
2099 spin_lock_irq(&phba->hbalock);
2100 iotag = psli->last_iotag;
2101 if(++iotag < psli->iocbq_lookup_len) {
2102 psli->last_iotag = iotag;
2103 psli->iocbq_lookup[iotag] = iocbq;
2104 spin_unlock_irq(&phba->hbalock);
2105 iocbq->iotag = iotag;
2107 } else if (psli->iocbq_lookup_len < (0xffff
2108 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2109 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2110 spin_unlock_irq(&phba->hbalock);
2111 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2114 spin_lock_irq(&phba->hbalock);
2115 old_arr = psli->iocbq_lookup;
2116 if (new_len <= psli->iocbq_lookup_len) {
2117 /* highly unprobable case */
2119 iotag = psli->last_iotag;
2120 if(++iotag < psli->iocbq_lookup_len) {
2121 psli->last_iotag = iotag;
2122 psli->iocbq_lookup[iotag] = iocbq;
2123 spin_unlock_irq(&phba->hbalock);
2124 iocbq->iotag = iotag;
2127 spin_unlock_irq(&phba->hbalock);
2130 if (psli->iocbq_lookup)
2131 memcpy(new_arr, old_arr,
2132 ((psli->last_iotag + 1) *
2133 sizeof (struct lpfc_iocbq *)));
2134 psli->iocbq_lookup = new_arr;
2135 psli->iocbq_lookup_len = new_len;
2136 psli->last_iotag = iotag;
2137 psli->iocbq_lookup[iotag] = iocbq;
2138 spin_unlock_irq(&phba->hbalock);
2139 iocbq->iotag = iotag;
2144 spin_unlock_irq(&phba->hbalock);
2146 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2147 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2154 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2155 * @phba: Pointer to HBA context object.
2156 * @pring: Pointer to driver SLI ring object.
2157 * @iocb: Pointer to iocb slot in the ring.
2158 * @nextiocb: Pointer to driver iocb object which need to be
2159 * posted to firmware.
2161 * This function is called to post a new iocb to the firmware. This
2162 * function copies the new iocb to ring iocb slot and updates the
2163 * ring pointers. It adds the new iocb to txcmplq if there is
2164 * a completion call back for this iocb else the function will free the
2165 * iocb object. The hbalock is asserted held in the code path calling
2169 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2170 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2175 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
2178 if (pring->ringno == LPFC_ELS_RING) {
2179 lpfc_debugfs_slow_ring_trc(phba,
2180 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2181 *(((uint32_t *) &nextiocb->iocb) + 4),
2182 *(((uint32_t *) &nextiocb->iocb) + 6),
2183 *(((uint32_t *) &nextiocb->iocb) + 7));
2187 * Issue iocb command to adapter
2189 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2191 pring->stats.iocb_cmd++;
2194 * If there is no completion routine to call, we can release the
2195 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2196 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
2198 if (nextiocb->iocb_cmpl)
2199 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2201 __lpfc_sli_release_iocbq(phba, nextiocb);
2204 * Let the HBA know what IOCB slot will be the next one the
2205 * driver will put a command into.
2207 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2208 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2212 * lpfc_sli_update_full_ring - Update the chip attention register
2213 * @phba: Pointer to HBA context object.
2214 * @pring: Pointer to driver SLI ring object.
2216 * The caller is not required to hold any lock for calling this function.
2217 * This function updates the chip attention bits for the ring to inform firmware
2218 * that there are pending work to be done for this ring and requests an
2219 * interrupt when there is space available in the ring. This function is
2220 * called when the driver is unable to post more iocbs to the ring due
2221 * to unavailability of space in the ring.
2224 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2226 int ringno = pring->ringno;
2228 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2233 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2234 * The HBA will tell us when an IOCB entry is available.
2236 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2237 readl(phba->CAregaddr); /* flush */
2239 pring->stats.iocb_cmd_full++;
2243 * lpfc_sli_update_ring - Update chip attention register
2244 * @phba: Pointer to HBA context object.
2245 * @pring: Pointer to driver SLI ring object.
2247 * This function updates the chip attention register bit for the
2248 * given ring to inform HBA that there is more work to be done
2249 * in this ring. The caller is not required to hold any lock.
2252 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2254 int ringno = pring->ringno;
2257 * Tell the HBA that there is work to do in this ring.
2259 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2261 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2262 readl(phba->CAregaddr); /* flush */
2267 * lpfc_sli_resume_iocb - Process iocbs in the txq
2268 * @phba: Pointer to HBA context object.
2269 * @pring: Pointer to driver SLI ring object.
2271 * This function is called with hbalock held to post pending iocbs
2272 * in the txq to the firmware. This function is called when driver
2273 * detects space available in the ring.
2276 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2279 struct lpfc_iocbq *nextiocb;
2281 lockdep_assert_held(&phba->hbalock);
2285 * (a) there is anything on the txq to send
2287 * (c) link attention events can be processed (fcp ring only)
2288 * (d) IOCB processing is not blocked by the outstanding mbox command.
2291 if (lpfc_is_link_up(phba) &&
2292 (!list_empty(&pring->txq)) &&
2293 (pring->ringno != LPFC_FCP_RING ||
2294 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2296 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2297 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2298 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2301 lpfc_sli_update_ring(phba, pring);
2303 lpfc_sli_update_full_ring(phba, pring);
2310 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2311 * @phba: Pointer to HBA context object.
2312 * @hbqno: HBQ number.
2314 * This function is called with hbalock held to get the next
2315 * available slot for the given HBQ. If there is free slot
2316 * available for the HBQ it will return pointer to the next available
2317 * HBQ entry else it will return NULL.
2319 static struct lpfc_hbq_entry *
2320 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2322 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2324 lockdep_assert_held(&phba->hbalock);
2326 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2327 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2328 hbqp->next_hbqPutIdx = 0;
2330 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2331 uint32_t raw_index = phba->hbq_get[hbqno];
2332 uint32_t getidx = le32_to_cpu(raw_index);
2334 hbqp->local_hbqGetIdx = getidx;
2336 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2338 "1802 HBQ %d: local_hbqGetIdx "
2339 "%u is > than hbqp->entry_count %u\n",
2340 hbqno, hbqp->local_hbqGetIdx,
2343 phba->link_state = LPFC_HBA_ERROR;
2347 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2351 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2356 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2357 * @phba: Pointer to HBA context object.
2359 * This function is called with no lock held to free all the
2360 * hbq buffers while uninitializing the SLI interface. It also
2361 * frees the HBQ buffers returned by the firmware but not yet
2362 * processed by the upper layers.
2365 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2367 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2368 struct hbq_dmabuf *hbq_buf;
2369 unsigned long flags;
2372 hbq_count = lpfc_sli_hbq_count();
2373 /* Return all memory used by all HBQs */
2374 spin_lock_irqsave(&phba->hbalock, flags);
2375 for (i = 0; i < hbq_count; ++i) {
2376 list_for_each_entry_safe(dmabuf, next_dmabuf,
2377 &phba->hbqs[i].hbq_buffer_list, list) {
2378 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2379 list_del(&hbq_buf->dbuf.list);
2380 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2382 phba->hbqs[i].buffer_count = 0;
2385 /* Mark the HBQs not in use */
2386 phba->hbq_in_use = 0;
2387 spin_unlock_irqrestore(&phba->hbalock, flags);
2391 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2392 * @phba: Pointer to HBA context object.
2393 * @hbqno: HBQ number.
2394 * @hbq_buf: Pointer to HBQ buffer.
2396 * This function is called with the hbalock held to post a
2397 * hbq buffer to the firmware. If the function finds an empty
2398 * slot in the HBQ, it will post the buffer. The function will return
2399 * pointer to the hbq entry if it successfully post the buffer
2400 * else it will return NULL.
2403 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2404 struct hbq_dmabuf *hbq_buf)
2406 lockdep_assert_held(&phba->hbalock);
2407 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2411 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2412 * @phba: Pointer to HBA context object.
2413 * @hbqno: HBQ number.
2414 * @hbq_buf: Pointer to HBQ buffer.
2416 * This function is called with the hbalock held to post a hbq buffer to the
2417 * firmware. If the function finds an empty slot in the HBQ, it will post the
2418 * buffer and place it on the hbq_buffer_list. The function will return zero if
2419 * it successfully post the buffer else it will return an error.
2422 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2423 struct hbq_dmabuf *hbq_buf)
2425 struct lpfc_hbq_entry *hbqe;
2426 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2428 lockdep_assert_held(&phba->hbalock);
2429 /* Get next HBQ entry slot to use */
2430 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2432 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2434 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2435 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2436 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2437 hbqe->bde.tus.f.bdeFlags = 0;
2438 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2439 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2441 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2442 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2444 readl(phba->hbq_put + hbqno);
2445 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2452 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2453 * @phba: Pointer to HBA context object.
2454 * @hbqno: HBQ number.
2455 * @hbq_buf: Pointer to HBQ buffer.
2457 * This function is called with the hbalock held to post an RQE to the SLI4
2458 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2459 * the hbq_buffer_list and return zero, otherwise it will return an error.
2462 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2463 struct hbq_dmabuf *hbq_buf)
2466 struct lpfc_rqe hrqe;
2467 struct lpfc_rqe drqe;
2468 struct lpfc_queue *hrq;
2469 struct lpfc_queue *drq;
2471 if (hbqno != LPFC_ELS_HBQ)
2473 hrq = phba->sli4_hba.hdr_rq;
2474 drq = phba->sli4_hba.dat_rq;
2476 lockdep_assert_held(&phba->hbalock);
2477 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2478 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2479 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2480 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2481 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2484 hbq_buf->tag = (rc | (hbqno << 16));
2485 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2489 /* HBQ for ELS and CT traffic. */
2490 static struct lpfc_hbq_init lpfc_els_hbq = {
2495 .ring_mask = (1 << LPFC_ELS_RING),
2502 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2507 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2508 * @phba: Pointer to HBA context object.
2509 * @hbqno: HBQ number.
2510 * @count: Number of HBQ buffers to be posted.
2512 * This function is called with no lock held to post more hbq buffers to the
2513 * given HBQ. The function returns the number of HBQ buffers successfully
2517 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2519 uint32_t i, posted = 0;
2520 unsigned long flags;
2521 struct hbq_dmabuf *hbq_buffer;
2522 LIST_HEAD(hbq_buf_list);
2523 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2526 if ((phba->hbqs[hbqno].buffer_count + count) >
2527 lpfc_hbq_defs[hbqno]->entry_count)
2528 count = lpfc_hbq_defs[hbqno]->entry_count -
2529 phba->hbqs[hbqno].buffer_count;
2532 /* Allocate HBQ entries */
2533 for (i = 0; i < count; i++) {
2534 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2537 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2539 /* Check whether HBQ is still in use */
2540 spin_lock_irqsave(&phba->hbalock, flags);
2541 if (!phba->hbq_in_use)
2543 while (!list_empty(&hbq_buf_list)) {
2544 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2546 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2548 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2549 phba->hbqs[hbqno].buffer_count++;
2552 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2554 spin_unlock_irqrestore(&phba->hbalock, flags);
2557 spin_unlock_irqrestore(&phba->hbalock, flags);
2558 while (!list_empty(&hbq_buf_list)) {
2559 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2561 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2567 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2568 * @phba: Pointer to HBA context object.
2571 * This function posts more buffers to the HBQ. This function
2572 * is called with no lock held. The function returns the number of HBQ entries
2573 * successfully allocated.
2576 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2578 if (phba->sli_rev == LPFC_SLI_REV4)
2581 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2582 lpfc_hbq_defs[qno]->add_count);
2586 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2587 * @phba: Pointer to HBA context object.
2588 * @qno: HBQ queue number.
2590 * This function is called from SLI initialization code path with
2591 * no lock held to post initial HBQ buffers to firmware. The
2592 * function returns the number of HBQ entries successfully allocated.
2595 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2597 if (phba->sli_rev == LPFC_SLI_REV4)
2598 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2599 lpfc_hbq_defs[qno]->entry_count);
2601 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2602 lpfc_hbq_defs[qno]->init_count);
2606 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2608 * This function removes the first hbq buffer on an hbq list and returns a
2609 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2611 static struct hbq_dmabuf *
2612 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2614 struct lpfc_dmabuf *d_buf;
2616 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2619 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2623 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2624 * @phba: Pointer to HBA context object.
2627 * This function removes the first RQ buffer on an RQ buffer list and returns a
2628 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2630 static struct rqb_dmabuf *
2631 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2633 struct lpfc_dmabuf *h_buf;
2634 struct lpfc_rqb *rqbp;
2637 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2638 struct lpfc_dmabuf, list);
2641 rqbp->buffer_count--;
2642 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2646 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2647 * @phba: Pointer to HBA context object.
2648 * @tag: Tag of the hbq buffer.
2650 * This function searches for the hbq buffer associated with the given tag in
2651 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2652 * otherwise it returns NULL.
2654 static struct hbq_dmabuf *
2655 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2657 struct lpfc_dmabuf *d_buf;
2658 struct hbq_dmabuf *hbq_buf;
2662 if (hbqno >= LPFC_MAX_HBQS)
2665 spin_lock_irq(&phba->hbalock);
2666 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2667 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2668 if (hbq_buf->tag == tag) {
2669 spin_unlock_irq(&phba->hbalock);
2673 spin_unlock_irq(&phba->hbalock);
2674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2675 "1803 Bad hbq tag. Data: x%x x%x\n",
2676 tag, phba->hbqs[tag >> 16].buffer_count);
2681 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2682 * @phba: Pointer to HBA context object.
2683 * @hbq_buffer: Pointer to HBQ buffer.
2685 * This function is called with hbalock. This function gives back
2686 * the hbq buffer to firmware. If the HBQ does not have space to
2687 * post the buffer, it will free the buffer.
2690 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2695 hbqno = hbq_buffer->tag >> 16;
2696 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2697 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2702 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2703 * @mbxCommand: mailbox command code.
2705 * This function is called by the mailbox event handler function to verify
2706 * that the completed mailbox command is a legitimate mailbox command. If the
2707 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2708 * and the mailbox event handler will take the HBA offline.
2711 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2715 switch (mbxCommand) {
2719 case MBX_WRITE_VPARMS:
2720 case MBX_RUN_BIU_DIAG:
2723 case MBX_CONFIG_LINK:
2724 case MBX_CONFIG_RING:
2725 case MBX_RESET_RING:
2726 case MBX_READ_CONFIG:
2727 case MBX_READ_RCONFIG:
2728 case MBX_READ_SPARM:
2729 case MBX_READ_STATUS:
2733 case MBX_READ_LNK_STAT:
2735 case MBX_UNREG_LOGIN:
2737 case MBX_DUMP_MEMORY:
2738 case MBX_DUMP_CONTEXT:
2741 case MBX_UPDATE_CFG:
2743 case MBX_DEL_LD_ENTRY:
2744 case MBX_RUN_PROGRAM:
2746 case MBX_SET_VARIABLE:
2747 case MBX_UNREG_D_ID:
2748 case MBX_KILL_BOARD:
2749 case MBX_CONFIG_FARP:
2752 case MBX_RUN_BIU_DIAG64:
2753 case MBX_CONFIG_PORT:
2754 case MBX_READ_SPARM64:
2755 case MBX_READ_RPI64:
2756 case MBX_REG_LOGIN64:
2757 case MBX_READ_TOPOLOGY:
2760 case MBX_LOAD_EXP_ROM:
2761 case MBX_ASYNCEVT_ENABLE:
2765 case MBX_PORT_CAPABILITIES:
2766 case MBX_PORT_IOV_CONTROL:
2767 case MBX_SLI4_CONFIG:
2768 case MBX_SLI4_REQ_FTRS:
2770 case MBX_UNREG_FCFI:
2775 case MBX_RESUME_RPI:
2776 case MBX_READ_EVENT_LOG_STATUS:
2777 case MBX_READ_EVENT_LOG:
2778 case MBX_SECURITY_MGMT:
2780 case MBX_ACCESS_VDATA:
2791 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2792 * @phba: Pointer to HBA context object.
2793 * @pmboxq: Pointer to mailbox command.
2795 * This is completion handler function for mailbox commands issued from
2796 * lpfc_sli_issue_mbox_wait function. This function is called by the
2797 * mailbox event handler function with no lock held. This function
2798 * will wake up thread waiting on the wait queue pointed by context1
2802 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2804 unsigned long drvr_flag;
2805 struct completion *pmbox_done;
2808 * If pmbox_done is empty, the driver thread gave up waiting and
2809 * continued running.
2811 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2812 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2813 pmbox_done = (struct completion *)pmboxq->context3;
2815 complete(pmbox_done);
2816 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2821 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2823 unsigned long iflags;
2825 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2826 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2827 spin_lock_irqsave(&ndlp->lock, iflags);
2828 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2829 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2830 spin_unlock_irqrestore(&ndlp->lock, iflags);
2832 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2836 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2837 * @phba: Pointer to HBA context object.
2838 * @pmb: Pointer to mailbox object.
2840 * This function is the default mailbox completion handler. It
2841 * frees the memory resources associated with the completed mailbox
2842 * command. If the completed command is a REG_LOGIN mailbox command,
2843 * this function will issue a UREG_LOGIN to re-claim the RPI.
2846 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2848 struct lpfc_vport *vport = pmb->vport;
2849 struct lpfc_dmabuf *mp;
2850 struct lpfc_nodelist *ndlp;
2851 struct Scsi_Host *shost;
2855 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2858 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2863 * If a REG_LOGIN succeeded after node is destroyed or node
2864 * is in re-discovery driver need to cleanup the RPI.
2866 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2867 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2868 !pmb->u.mb.mbxStatus) {
2869 rpi = pmb->u.mb.un.varWords[0];
2870 vpi = pmb->u.mb.un.varRegLogin.vpi;
2871 if (phba->sli_rev == LPFC_SLI_REV4)
2872 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2873 lpfc_unreg_login(phba, vpi, rpi, pmb);
2875 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2876 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2877 if (rc != MBX_NOT_FINISHED)
2881 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2882 !(phba->pport->load_flag & FC_UNLOADING) &&
2883 !pmb->u.mb.mbxStatus) {
2884 shost = lpfc_shost_from_vport(vport);
2885 spin_lock_irq(shost->host_lock);
2886 vport->vpi_state |= LPFC_VPI_REGISTERED;
2887 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2888 spin_unlock_irq(shost->host_lock);
2891 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2892 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2894 pmb->ctx_buf = NULL;
2895 pmb->ctx_ndlp = NULL;
2898 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2899 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2901 /* Check to see if there are any deferred events to process */
2905 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2906 "1438 UNREG cmpl deferred mbox x%x "
2907 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2908 ndlp->nlp_rpi, ndlp->nlp_DID,
2909 ndlp->nlp_flag, ndlp->nlp_defer_did,
2910 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2912 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2913 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2914 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2915 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2916 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2918 __lpfc_sli_rpi_release(vport, ndlp);
2921 /* The unreg_login mailbox is complete and had a
2922 * reference that has to be released. The PLOGI
2926 pmb->ctx_ndlp = NULL;
2930 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2931 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2932 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2936 /* Check security permission status on INIT_LINK mailbox command */
2937 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2938 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2940 "2860 SLI authentication is required "
2941 "for INIT_LINK but has not done yet\n");
2943 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2944 lpfc_sli4_mbox_cmd_free(phba, pmb);
2946 mempool_free(pmb, phba->mbox_mem_pool);
2949 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2950 * @phba: Pointer to HBA context object.
2951 * @pmb: Pointer to mailbox object.
2953 * This function is the unreg rpi mailbox completion handler. It
2954 * frees the memory resources associated with the completed mailbox
2955 * command. An additional reference is put on the ndlp to prevent
2956 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2957 * the unreg mailbox command completes, this routine puts the
2962 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2964 struct lpfc_vport *vport = pmb->vport;
2965 struct lpfc_nodelist *ndlp;
2967 ndlp = pmb->ctx_ndlp;
2968 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2969 if (phba->sli_rev == LPFC_SLI_REV4 &&
2970 (bf_get(lpfc_sli_intf_if_type,
2971 &phba->sli4_hba.sli_intf) >=
2972 LPFC_SLI_INTF_IF_TYPE_2)) {
2975 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2976 "0010 UNREG_LOGIN vpi:%x "
2977 "rpi:%x DID:%x defer x%x flg x%x "
2979 vport->vpi, ndlp->nlp_rpi,
2980 ndlp->nlp_DID, ndlp->nlp_defer_did,
2983 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2985 /* Check to see if there are any deferred
2988 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2989 (ndlp->nlp_defer_did !=
2990 NLP_EVT_NOTHING_PENDING)) {
2992 vport, KERN_INFO, LOG_DISCOVERY,
2993 "4111 UNREG cmpl deferred "
2995 "NPort x%x Data: x%x x%px\n",
2996 ndlp->nlp_rpi, ndlp->nlp_DID,
2997 ndlp->nlp_defer_did, ndlp);
2998 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2999 ndlp->nlp_defer_did =
3000 NLP_EVT_NOTHING_PENDING;
3001 lpfc_issue_els_plogi(
3002 vport, ndlp->nlp_DID, 0);
3004 __lpfc_sli_rpi_release(vport, ndlp);
3011 mempool_free(pmb, phba->mbox_mem_pool);
3015 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3016 * @phba: Pointer to HBA context object.
3018 * This function is called with no lock held. This function processes all
3019 * the completed mailbox commands and gives it to upper layers. The interrupt
3020 * service routine processes mailbox completion interrupt and adds completed
3021 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3022 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3023 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3024 * function returns the mailbox commands to the upper layer by calling the
3025 * completion handler function of each mailbox.
3028 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3035 phba->sli.slistat.mbox_event++;
3037 /* Get all completed mailboxe buffers into the cmplq */
3038 spin_lock_irq(&phba->hbalock);
3039 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3040 spin_unlock_irq(&phba->hbalock);
3042 /* Get a Mailbox buffer to setup mailbox commands for callback */
3044 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3050 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3052 lpfc_debugfs_disc_trc(pmb->vport,
3053 LPFC_DISC_TRC_MBOX_VPORT,
3054 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3055 (uint32_t)pmbox->mbxCommand,
3056 pmbox->un.varWords[0],
3057 pmbox->un.varWords[1]);
3060 lpfc_debugfs_disc_trc(phba->pport,
3062 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3063 (uint32_t)pmbox->mbxCommand,
3064 pmbox->un.varWords[0],
3065 pmbox->un.varWords[1]);
3070 * It is a fatal error if unknown mbox command completion.
3072 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3074 /* Unknown mailbox command compl */
3075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3076 "(%d):0323 Unknown Mailbox command "
3077 "x%x (x%x/x%x) Cmpl\n",
3078 pmb->vport ? pmb->vport->vpi :
3081 lpfc_sli_config_mbox_subsys_get(phba,
3083 lpfc_sli_config_mbox_opcode_get(phba,
3085 phba->link_state = LPFC_HBA_ERROR;
3086 phba->work_hs = HS_FFER3;
3087 lpfc_handle_eratt(phba);
3091 if (pmbox->mbxStatus) {
3092 phba->sli.slistat.mbox_stat_err++;
3093 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3094 /* Mbox cmd cmpl error - RETRYing */
3095 lpfc_printf_log(phba, KERN_INFO,
3097 "(%d):0305 Mbox cmd cmpl "
3098 "error - RETRYing Data: x%x "
3099 "(x%x/x%x) x%x x%x x%x\n",
3100 pmb->vport ? pmb->vport->vpi :
3103 lpfc_sli_config_mbox_subsys_get(phba,
3105 lpfc_sli_config_mbox_opcode_get(phba,
3108 pmbox->un.varWords[0],
3109 pmb->vport ? pmb->vport->port_state :
3110 LPFC_VPORT_UNKNOWN);
3111 pmbox->mbxStatus = 0;
3112 pmbox->mbxOwner = OWN_HOST;
3113 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3114 if (rc != MBX_NOT_FINISHED)
3119 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3120 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3121 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3122 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3124 pmb->vport ? pmb->vport->vpi : 0,
3126 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3127 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3129 *((uint32_t *) pmbox),
3130 pmbox->un.varWords[0],
3131 pmbox->un.varWords[1],
3132 pmbox->un.varWords[2],
3133 pmbox->un.varWords[3],
3134 pmbox->un.varWords[4],
3135 pmbox->un.varWords[5],
3136 pmbox->un.varWords[6],
3137 pmbox->un.varWords[7],
3138 pmbox->un.varWords[8],
3139 pmbox->un.varWords[9],
3140 pmbox->un.varWords[10]);
3143 pmb->mbox_cmpl(phba,pmb);
3149 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3150 * @phba: Pointer to HBA context object.
3151 * @pring: Pointer to driver SLI ring object.
3154 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3155 * is set in the tag the buffer is posted for a particular exchange,
3156 * the function will return the buffer without replacing the buffer.
3157 * If the buffer is for unsolicited ELS or CT traffic, this function
3158 * returns the buffer and also posts another buffer to the firmware.
3160 static struct lpfc_dmabuf *
3161 lpfc_sli_get_buff(struct lpfc_hba *phba,
3162 struct lpfc_sli_ring *pring,
3165 struct hbq_dmabuf *hbq_entry;
3167 if (tag & QUE_BUFTAG_BIT)
3168 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3169 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3172 return &hbq_entry->dbuf;
3176 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3177 * containing a NVME LS request.
3178 * @phba: pointer to lpfc hba data structure.
3179 * @piocb: pointer to the iocbq struct representing the sequence starting
3182 * This routine initially validates the NVME LS, validates there is a login
3183 * with the port that sent the LS, and then calls the appropriate nvme host
3184 * or target LS request handler.
3187 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3189 struct lpfc_nodelist *ndlp;
3190 struct lpfc_dmabuf *d_buf;
3191 struct hbq_dmabuf *nvmebuf;
3192 struct fc_frame_header *fc_hdr;
3193 struct lpfc_async_xchg_ctx *axchg = NULL;
3194 char *failwhy = NULL;
3195 uint32_t oxid, sid, did, fctl, size;
3198 d_buf = piocb->context2;
3200 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3201 fc_hdr = nvmebuf->hbuf.virt;
3202 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3203 sid = sli4_sid_from_fc_hdr(fc_hdr);
3204 did = sli4_did_from_fc_hdr(fc_hdr);
3205 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3206 fc_hdr->fh_f_ctl[1] << 8 |
3207 fc_hdr->fh_f_ctl[2]);
3208 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3210 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3213 if (phba->pport->load_flag & FC_UNLOADING) {
3214 failwhy = "Driver Unloading";
3215 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3216 failwhy = "NVME FC4 Disabled";
3217 } else if (!phba->nvmet_support && !phba->pport->localport) {
3218 failwhy = "No Localport";
3219 } else if (phba->nvmet_support && !phba->targetport) {
3220 failwhy = "No Targetport";
3221 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3222 failwhy = "Bad NVME LS R_CTL";
3223 } else if (unlikely((fctl & 0x00FF0000) !=
3224 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3225 failwhy = "Bad NVME LS F_CTL";
3227 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3229 failwhy = "No CTX memory";
3232 if (unlikely(failwhy)) {
3233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3234 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3235 sid, oxid, failwhy);
3239 /* validate the source of the LS is logged in */
3240 ndlp = lpfc_findnode_did(phba->pport, sid);
3242 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3243 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3244 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3245 "6216 NVME Unsol rcv: No ndlp: "
3246 "NPort_ID x%x oxid x%x\n",
3257 axchg->state = LPFC_NVME_STE_LS_RCV;
3258 axchg->entry_cnt = 1;
3259 axchg->rqb_buffer = (void *)nvmebuf;
3260 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3261 axchg->payload = nvmebuf->dbuf.virt;
3262 INIT_LIST_HEAD(&axchg->list);
3264 if (phba->nvmet_support) {
3265 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3266 spin_lock_irq(&ndlp->lock);
3267 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3268 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3269 spin_unlock_irq(&ndlp->lock);
3271 /* This reference is a single occurrence to hold the
3272 * node valid until the nvmet transport calls
3275 if (!lpfc_nlp_get(ndlp))
3278 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3279 "6206 NVMET unsol ls_req ndlp x%px "
3280 "DID x%x xflags x%x refcnt %d\n",
3281 ndlp, ndlp->nlp_DID,
3282 ndlp->fc4_xpt_flags,
3283 kref_read(&ndlp->kref));
3285 spin_unlock_irq(&ndlp->lock);
3288 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3291 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3297 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3298 "NVMe%s handler failed %d\n",
3300 (phba->nvmet_support) ? "T" : "I", ret);
3302 /* recycle receive buffer */
3303 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3305 /* If start of new exchange, abort it */
3306 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3307 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3314 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3315 * @phba: Pointer to HBA context object.
3316 * @pring: Pointer to driver SLI ring object.
3317 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3318 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3319 * @fch_type: the type for the first frame of the sequence.
3321 * This function is called with no lock held. This function uses the r_ctl and
3322 * type of the received sequence to find the correct callback function to call
3323 * to process the sequence.
3326 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3327 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3334 lpfc_nvme_unsol_ls_handler(phba, saveq);
3340 /* unSolicited Responses */
3341 if (pring->prt[0].profile) {
3342 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3343 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3347 /* We must search, based on rctl / type
3348 for the right routine */
3349 for (i = 0; i < pring->num_mask; i++) {
3350 if ((pring->prt[i].rctl == fch_r_ctl) &&
3351 (pring->prt[i].type == fch_type)) {
3352 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3353 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3354 (phba, pring, saveq);
3362 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3363 * @phba: Pointer to HBA context object.
3364 * @pring: Pointer to driver SLI ring object.
3365 * @saveq: Pointer to the unsolicited iocb.
3367 * This function is called with no lock held by the ring event handler
3368 * when there is an unsolicited iocb posted to the response ring by the
3369 * firmware. This function gets the buffer associated with the iocbs
3370 * and calls the event handler for the ring. This function handles both
3371 * qring buffers and hbq buffers.
3372 * When the function returns 1 the caller can free the iocb object otherwise
3373 * upper layer functions will free the iocb objects.
3376 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3377 struct lpfc_iocbq *saveq)
3381 uint32_t Rctl, Type;
3382 struct lpfc_iocbq *iocbq;
3383 struct lpfc_dmabuf *dmzbuf;
3385 irsp = &(saveq->iocb);
3387 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3388 if (pring->lpfc_sli_rcv_async_status)
3389 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3391 lpfc_printf_log(phba,
3394 "0316 Ring %d handler: unexpected "
3395 "ASYNC_STATUS iocb received evt_code "
3398 irsp->un.asyncstat.evt_code);
3402 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3403 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3404 if (irsp->ulpBdeCount > 0) {
3405 dmzbuf = lpfc_sli_get_buff(phba, pring,
3406 irsp->un.ulpWord[3]);
3407 lpfc_in_buf_free(phba, dmzbuf);
3410 if (irsp->ulpBdeCount > 1) {
3411 dmzbuf = lpfc_sli_get_buff(phba, pring,
3412 irsp->unsli3.sli3Words[3]);
3413 lpfc_in_buf_free(phba, dmzbuf);
3416 if (irsp->ulpBdeCount > 2) {
3417 dmzbuf = lpfc_sli_get_buff(phba, pring,
3418 irsp->unsli3.sli3Words[7]);
3419 lpfc_in_buf_free(phba, dmzbuf);
3425 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3426 if (irsp->ulpBdeCount != 0) {
3427 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3428 irsp->un.ulpWord[3]);
3429 if (!saveq->context2)
3430 lpfc_printf_log(phba,
3433 "0341 Ring %d Cannot find buffer for "
3434 "an unsolicited iocb. tag 0x%x\n",
3436 irsp->un.ulpWord[3]);
3438 if (irsp->ulpBdeCount == 2) {
3439 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3440 irsp->unsli3.sli3Words[7]);
3441 if (!saveq->context3)
3442 lpfc_printf_log(phba,
3445 "0342 Ring %d Cannot find buffer for an"
3446 " unsolicited iocb. tag 0x%x\n",
3448 irsp->unsli3.sli3Words[7]);
3450 list_for_each_entry(iocbq, &saveq->list, list) {
3451 irsp = &(iocbq->iocb);
3452 if (irsp->ulpBdeCount != 0) {
3453 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3454 irsp->un.ulpWord[3]);
3455 if (!iocbq->context2)
3456 lpfc_printf_log(phba,
3459 "0343 Ring %d Cannot find "
3460 "buffer for an unsolicited iocb"
3461 ". tag 0x%x\n", pring->ringno,
3462 irsp->un.ulpWord[3]);
3464 if (irsp->ulpBdeCount == 2) {
3465 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3466 irsp->unsli3.sli3Words[7]);
3467 if (!iocbq->context3)
3468 lpfc_printf_log(phba,
3471 "0344 Ring %d Cannot find "
3472 "buffer for an unsolicited "
3475 irsp->unsli3.sli3Words[7]);
3479 if (irsp->ulpBdeCount != 0 &&
3480 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3481 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3484 /* search continue save q for same XRI */
3485 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3486 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3487 saveq->iocb.unsli3.rcvsli3.ox_id) {
3488 list_add_tail(&saveq->list, &iocbq->list);
3494 list_add_tail(&saveq->clist,
3495 &pring->iocb_continue_saveq);
3496 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3497 list_del_init(&iocbq->clist);
3499 irsp = &(saveq->iocb);
3503 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3504 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3505 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3506 Rctl = FC_RCTL_ELS_REQ;
3509 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3510 Rctl = w5p->hcsw.Rctl;
3511 Type = w5p->hcsw.Type;
3513 /* Firmware Workaround */
3514 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3515 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3516 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3517 Rctl = FC_RCTL_ELS_REQ;
3519 w5p->hcsw.Rctl = Rctl;
3520 w5p->hcsw.Type = Type;
3524 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3525 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3526 "0313 Ring %d handler: unexpected Rctl x%x "
3527 "Type x%x received\n",
3528 pring->ringno, Rctl, Type);
3534 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3535 * @phba: Pointer to HBA context object.
3536 * @pring: Pointer to driver SLI ring object.
3537 * @prspiocb: Pointer to response iocb object.
3539 * This function looks up the iocb_lookup table to get the command iocb
3540 * corresponding to the given response iocb using the iotag of the
3541 * response iocb. The driver calls this function with the hbalock held
3542 * for SLI3 ports or the ring lock held for SLI4 ports.
3543 * This function returns the command iocb object if it finds the command
3544 * iocb else returns NULL.
3546 static struct lpfc_iocbq *
3547 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3548 struct lpfc_sli_ring *pring,
3549 struct lpfc_iocbq *prspiocb)
3551 struct lpfc_iocbq *cmd_iocb = NULL;
3553 spinlock_t *temp_lock = NULL;
3554 unsigned long iflag = 0;
3556 if (phba->sli_rev == LPFC_SLI_REV4)
3557 temp_lock = &pring->ring_lock;
3559 temp_lock = &phba->hbalock;
3561 spin_lock_irqsave(temp_lock, iflag);
3562 iotag = prspiocb->iocb.ulpIoTag;
3564 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3565 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3566 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3567 /* remove from txcmpl queue list */
3568 list_del_init(&cmd_iocb->list);
3569 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3570 pring->txcmplq_cnt--;
3571 spin_unlock_irqrestore(temp_lock, iflag);
3576 spin_unlock_irqrestore(temp_lock, iflag);
3577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3578 "0317 iotag x%x is out of "
3579 "range: max iotag x%x wd0 x%x\n",
3580 iotag, phba->sli.last_iotag,
3581 *(((uint32_t *) &prspiocb->iocb) + 7));
3586 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3587 * @phba: Pointer to HBA context object.
3588 * @pring: Pointer to driver SLI ring object.
3591 * This function looks up the iocb_lookup table to get the command iocb
3592 * corresponding to the given iotag. The driver calls this function with
3593 * the ring lock held because this function is an SLI4 port only helper.
3594 * This function returns the command iocb object if it finds the command
3595 * iocb else returns NULL.
3597 static struct lpfc_iocbq *
3598 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3599 struct lpfc_sli_ring *pring, uint16_t iotag)
3601 struct lpfc_iocbq *cmd_iocb = NULL;
3602 spinlock_t *temp_lock = NULL;
3603 unsigned long iflag = 0;
3605 if (phba->sli_rev == LPFC_SLI_REV4)
3606 temp_lock = &pring->ring_lock;
3608 temp_lock = &phba->hbalock;
3610 spin_lock_irqsave(temp_lock, iflag);
3611 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3612 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3613 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3614 /* remove from txcmpl queue list */
3615 list_del_init(&cmd_iocb->list);
3616 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3617 pring->txcmplq_cnt--;
3618 spin_unlock_irqrestore(temp_lock, iflag);
3623 spin_unlock_irqrestore(temp_lock, iflag);
3624 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3625 "0372 iotag x%x lookup error: max iotag (x%x) "
3627 iotag, phba->sli.last_iotag,
3628 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3633 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3634 * @phba: Pointer to HBA context object.
3635 * @pring: Pointer to driver SLI ring object.
3636 * @saveq: Pointer to the response iocb to be processed.
3638 * This function is called by the ring event handler for non-fcp
3639 * rings when there is a new response iocb in the response ring.
3640 * The caller is not required to hold any locks. This function
3641 * gets the command iocb associated with the response iocb and
3642 * calls the completion handler for the command iocb. If there
3643 * is no completion handler, the function will free the resources
3644 * associated with command iocb. If the response iocb is for
3645 * an already aborted command iocb, the status of the completion
3646 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3647 * This function always returns 1.
3650 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3651 struct lpfc_iocbq *saveq)
3653 struct lpfc_iocbq *cmdiocbp;
3655 unsigned long iflag;
3657 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3659 if (cmdiocbp->iocb_cmpl) {
3661 * If an ELS command failed send an event to mgmt
3664 if (saveq->iocb.ulpStatus &&
3665 (pring->ringno == LPFC_ELS_RING) &&
3666 (cmdiocbp->iocb.ulpCommand ==
3667 CMD_ELS_REQUEST64_CR))
3668 lpfc_send_els_failure_event(phba,
3672 * Post all ELS completions to the worker thread.
3673 * All other are passed to the completion callback.
3675 if (pring->ringno == LPFC_ELS_RING) {
3676 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3677 (cmdiocbp->iocb_flag &
3678 LPFC_DRIVER_ABORTED)) {
3679 spin_lock_irqsave(&phba->hbalock,
3681 cmdiocbp->iocb_flag &=
3682 ~LPFC_DRIVER_ABORTED;
3683 spin_unlock_irqrestore(&phba->hbalock,
3685 saveq->iocb.ulpStatus =
3686 IOSTAT_LOCAL_REJECT;
3687 saveq->iocb.un.ulpWord[4] =
3690 /* Firmware could still be in progress
3691 * of DMAing payload, so don't free data
3692 * buffer till after a hbeat.
3694 spin_lock_irqsave(&phba->hbalock,
3696 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3697 spin_unlock_irqrestore(&phba->hbalock,
3700 if (phba->sli_rev == LPFC_SLI_REV4) {
3701 if (saveq->iocb_flag &
3702 LPFC_EXCHANGE_BUSY) {
3703 /* Set cmdiocb flag for the
3704 * exchange busy so sgl (xri)
3705 * will not be released until
3706 * the abort xri is received
3710 &phba->hbalock, iflag);
3711 cmdiocbp->iocb_flag |=
3713 spin_unlock_irqrestore(
3714 &phba->hbalock, iflag);
3716 if (cmdiocbp->iocb_flag &
3717 LPFC_DRIVER_ABORTED) {
3719 * Clear LPFC_DRIVER_ABORTED
3720 * bit in case it was driver
3724 &phba->hbalock, iflag);
3725 cmdiocbp->iocb_flag &=
3726 ~LPFC_DRIVER_ABORTED;
3727 spin_unlock_irqrestore(
3728 &phba->hbalock, iflag);
3729 cmdiocbp->iocb.ulpStatus =
3730 IOSTAT_LOCAL_REJECT;
3731 cmdiocbp->iocb.un.ulpWord[4] =
3732 IOERR_ABORT_REQUESTED;
3734 * For SLI4, irsiocb contains
3735 * NO_XRI in sli_xritag, it
3736 * shall not affect releasing
3737 * sgl (xri) process.
3739 saveq->iocb.ulpStatus =
3740 IOSTAT_LOCAL_REJECT;
3741 saveq->iocb.un.ulpWord[4] =
3744 &phba->hbalock, iflag);
3746 LPFC_DELAY_MEM_FREE;
3747 spin_unlock_irqrestore(
3748 &phba->hbalock, iflag);
3752 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3754 lpfc_sli_release_iocbq(phba, cmdiocbp);
3757 * Unknown initiating command based on the response iotag.
3758 * This could be the case on the ELS ring because of
3761 if (pring->ringno != LPFC_ELS_RING) {
3763 * Ring <ringno> handler: unexpected completion IoTag
3766 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3767 "0322 Ring %d handler: "
3768 "unexpected completion IoTag x%x "
3769 "Data: x%x x%x x%x x%x\n",
3771 saveq->iocb.ulpIoTag,
3772 saveq->iocb.ulpStatus,
3773 saveq->iocb.un.ulpWord[4],
3774 saveq->iocb.ulpCommand,
3775 saveq->iocb.ulpContext);
3783 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3784 * @phba: Pointer to HBA context object.
3785 * @pring: Pointer to driver SLI ring object.
3787 * This function is called from the iocb ring event handlers when
3788 * put pointer is ahead of the get pointer for a ring. This function signal
3789 * an error attention condition to the worker thread and the worker
3790 * thread will transition the HBA to offline state.
3793 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3795 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3797 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3798 * rsp ring <portRspMax>
3800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3801 "0312 Ring %d handler: portRspPut %d "
3802 "is bigger than rsp ring %d\n",
3803 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3804 pring->sli.sli3.numRiocb);
3806 phba->link_state = LPFC_HBA_ERROR;
3809 * All error attention handlers are posted to
3812 phba->work_ha |= HA_ERATT;
3813 phba->work_hs = HS_FFER3;
3815 lpfc_worker_wake_up(phba);
3821 * lpfc_poll_eratt - Error attention polling timer timeout handler
3822 * @t: Context to fetch pointer to address of HBA context object from.
3824 * This function is invoked by the Error Attention polling timer when the
3825 * timer times out. It will check the SLI Error Attention register for
3826 * possible attention events. If so, it will post an Error Attention event
3827 * and wake up worker thread to process it. Otherwise, it will set up the
3828 * Error Attention polling timer for the next poll.
3830 void lpfc_poll_eratt(struct timer_list *t)
3832 struct lpfc_hba *phba;
3834 uint64_t sli_intr, cnt;
3836 phba = from_timer(phba, t, eratt_poll);
3838 /* Here we will also keep track of interrupts per sec of the hba */
3839 sli_intr = phba->sli.slistat.sli_intr;
3841 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3842 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3845 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3847 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3848 do_div(cnt, phba->eratt_poll_interval);
3849 phba->sli.slistat.sli_ips = cnt;
3851 phba->sli.slistat.sli_prev_intr = sli_intr;
3853 /* Check chip HA register for error event */
3854 eratt = lpfc_sli_check_eratt(phba);
3857 /* Tell the worker thread there is work to do */
3858 lpfc_worker_wake_up(phba);
3860 /* Restart the timer for next eratt poll */
3861 mod_timer(&phba->eratt_poll,
3863 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3869 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3870 * @phba: Pointer to HBA context object.
3871 * @pring: Pointer to driver SLI ring object.
3872 * @mask: Host attention register mask for this ring.
3874 * This function is called from the interrupt context when there is a ring
3875 * event for the fcp ring. The caller does not hold any lock.
3876 * The function processes each response iocb in the response ring until it
3877 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3878 * LE bit set. The function will call the completion handler of the command iocb
3879 * if the response iocb indicates a completion for a command iocb or it is
3880 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3881 * function if this is an unsolicited iocb.
3882 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3883 * to check it explicitly.
3886 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3887 struct lpfc_sli_ring *pring, uint32_t mask)
3889 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3890 IOCB_t *irsp = NULL;
3891 IOCB_t *entry = NULL;
3892 struct lpfc_iocbq *cmdiocbq = NULL;
3893 struct lpfc_iocbq rspiocbq;
3895 uint32_t portRspPut, portRspMax;
3897 lpfc_iocb_type type;
3898 unsigned long iflag;
3899 uint32_t rsp_cmpl = 0;
3901 spin_lock_irqsave(&phba->hbalock, iflag);
3902 pring->stats.iocb_event++;
3905 * The next available response entry should never exceed the maximum
3906 * entries. If it does, treat it as an adapter hardware error.
3908 portRspMax = pring->sli.sli3.numRiocb;
3909 portRspPut = le32_to_cpu(pgp->rspPutInx);
3910 if (unlikely(portRspPut >= portRspMax)) {
3911 lpfc_sli_rsp_pointers_error(phba, pring);
3912 spin_unlock_irqrestore(&phba->hbalock, iflag);
3915 if (phba->fcp_ring_in_use) {
3916 spin_unlock_irqrestore(&phba->hbalock, iflag);
3919 phba->fcp_ring_in_use = 1;
3922 while (pring->sli.sli3.rspidx != portRspPut) {
3924 * Fetch an entry off the ring and copy it into a local data
3925 * structure. The copy involves a byte-swap since the
3926 * network byte order and pci byte orders are different.
3928 entry = lpfc_resp_iocb(phba, pring);
3929 phba->last_completion_time = jiffies;
3931 if (++pring->sli.sli3.rspidx >= portRspMax)
3932 pring->sli.sli3.rspidx = 0;
3934 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3935 (uint32_t *) &rspiocbq.iocb,
3936 phba->iocb_rsp_size);
3937 INIT_LIST_HEAD(&(rspiocbq.list));
3938 irsp = &rspiocbq.iocb;
3940 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3941 pring->stats.iocb_rsp++;
3944 if (unlikely(irsp->ulpStatus)) {
3946 * If resource errors reported from HBA, reduce
3947 * queuedepths of the SCSI device.
3949 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3950 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3951 IOERR_NO_RESOURCES)) {
3952 spin_unlock_irqrestore(&phba->hbalock, iflag);
3953 phba->lpfc_rampdown_queue_depth(phba);
3954 spin_lock_irqsave(&phba->hbalock, iflag);
3957 /* Rsp ring <ringno> error: IOCB */
3958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3959 "0336 Rsp Ring %d error: IOCB Data: "
3960 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3962 irsp->un.ulpWord[0],
3963 irsp->un.ulpWord[1],
3964 irsp->un.ulpWord[2],
3965 irsp->un.ulpWord[3],
3966 irsp->un.ulpWord[4],
3967 irsp->un.ulpWord[5],
3968 *(uint32_t *)&irsp->un1,
3969 *((uint32_t *)&irsp->un1 + 1));
3973 case LPFC_ABORT_IOCB:
3976 * Idle exchange closed via ABTS from port. No iocb
3977 * resources need to be recovered.
3979 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3980 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3981 "0333 IOCB cmd 0x%x"
3982 " processed. Skipping"
3988 spin_unlock_irqrestore(&phba->hbalock, iflag);
3989 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3991 spin_lock_irqsave(&phba->hbalock, iflag);
3992 if (unlikely(!cmdiocbq))
3994 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3995 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3996 if (cmdiocbq->iocb_cmpl) {
3997 spin_unlock_irqrestore(&phba->hbalock, iflag);
3998 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
4000 spin_lock_irqsave(&phba->hbalock, iflag);
4003 case LPFC_UNSOL_IOCB:
4004 spin_unlock_irqrestore(&phba->hbalock, iflag);
4005 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4006 spin_lock_irqsave(&phba->hbalock, iflag);
4009 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4010 char adaptermsg[LPFC_MAX_ADPTMSG];
4011 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4012 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4014 dev_warn(&((phba->pcidev)->dev),
4016 phba->brd_no, adaptermsg);
4018 /* Unknown IOCB command */
4019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4020 "0334 Unknown IOCB command "
4021 "Data: x%x, x%x x%x x%x x%x\n",
4022 type, irsp->ulpCommand,
4031 * The response IOCB has been processed. Update the ring
4032 * pointer in SLIM. If the port response put pointer has not
4033 * been updated, sync the pgp->rspPutInx and fetch the new port
4034 * response put pointer.
4036 writel(pring->sli.sli3.rspidx,
4037 &phba->host_gp[pring->ringno].rspGetInx);
4039 if (pring->sli.sli3.rspidx == portRspPut)
4040 portRspPut = le32_to_cpu(pgp->rspPutInx);
4043 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4044 pring->stats.iocb_rsp_full++;
4045 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4046 writel(status, phba->CAregaddr);
4047 readl(phba->CAregaddr);
4049 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4050 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4051 pring->stats.iocb_cmd_empty++;
4053 /* Force update of the local copy of cmdGetInx */
4054 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4055 lpfc_sli_resume_iocb(phba, pring);
4057 if ((pring->lpfc_sli_cmd_available))
4058 (pring->lpfc_sli_cmd_available) (phba, pring);
4062 phba->fcp_ring_in_use = 0;
4063 spin_unlock_irqrestore(&phba->hbalock, iflag);
4068 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4069 * @phba: Pointer to HBA context object.
4070 * @pring: Pointer to driver SLI ring object.
4071 * @rspiocbp: Pointer to driver response IOCB object.
4073 * This function is called from the worker thread when there is a slow-path
4074 * response IOCB to process. This function chains all the response iocbs until
4075 * seeing the iocb with the LE bit set. The function will call
4076 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4077 * completion of a command iocb. The function will call the
4078 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4079 * The function frees the resources or calls the completion handler if this
4080 * iocb is an abort completion. The function returns NULL when the response
4081 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4082 * this function shall chain the iocb on to the iocb_continueq and return the
4083 * response iocb passed in.
4085 static struct lpfc_iocbq *
4086 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4087 struct lpfc_iocbq *rspiocbp)
4089 struct lpfc_iocbq *saveq;
4090 struct lpfc_iocbq *cmdiocbp;
4091 struct lpfc_iocbq *next_iocb;
4092 IOCB_t *irsp = NULL;
4093 uint32_t free_saveq;
4094 uint8_t iocb_cmd_type;
4095 lpfc_iocb_type type;
4096 unsigned long iflag;
4099 spin_lock_irqsave(&phba->hbalock, iflag);
4100 /* First add the response iocb to the countinueq list */
4101 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
4102 pring->iocb_continueq_cnt++;
4104 /* Now, determine whether the list is completed for processing */
4105 irsp = &rspiocbp->iocb;
4108 * By default, the driver expects to free all resources
4109 * associated with this iocb completion.
4112 saveq = list_get_first(&pring->iocb_continueq,
4113 struct lpfc_iocbq, list);
4114 irsp = &(saveq->iocb);
4115 list_del_init(&pring->iocb_continueq);
4116 pring->iocb_continueq_cnt = 0;
4118 pring->stats.iocb_rsp++;
4121 * If resource errors reported from HBA, reduce
4122 * queuedepths of the SCSI device.
4124 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4125 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4126 IOERR_NO_RESOURCES)) {
4127 spin_unlock_irqrestore(&phba->hbalock, iflag);
4128 phba->lpfc_rampdown_queue_depth(phba);
4129 spin_lock_irqsave(&phba->hbalock, iflag);
4132 if (irsp->ulpStatus) {
4133 /* Rsp ring <ringno> error: IOCB */
4134 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4135 "0328 Rsp Ring %d error: "
4140 "x%x x%x x%x x%x\n",
4142 irsp->un.ulpWord[0],
4143 irsp->un.ulpWord[1],
4144 irsp->un.ulpWord[2],
4145 irsp->un.ulpWord[3],
4146 irsp->un.ulpWord[4],
4147 irsp->un.ulpWord[5],
4148 *(((uint32_t *) irsp) + 6),
4149 *(((uint32_t *) irsp) + 7),
4150 *(((uint32_t *) irsp) + 8),
4151 *(((uint32_t *) irsp) + 9),
4152 *(((uint32_t *) irsp) + 10),
4153 *(((uint32_t *) irsp) + 11),
4154 *(((uint32_t *) irsp) + 12),
4155 *(((uint32_t *) irsp) + 13),
4156 *(((uint32_t *) irsp) + 14),
4157 *(((uint32_t *) irsp) + 15));
4161 * Fetch the IOCB command type and call the correct completion
4162 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4163 * get freed back to the lpfc_iocb_list by the discovery
4166 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
4167 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
4170 spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4172 spin_lock_irqsave(&phba->hbalock, iflag);
4175 case LPFC_UNSOL_IOCB:
4176 spin_unlock_irqrestore(&phba->hbalock, iflag);
4177 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4178 spin_lock_irqsave(&phba->hbalock, iflag);
4183 case LPFC_ABORT_IOCB:
4185 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
4186 spin_unlock_irqrestore(&phba->hbalock, iflag);
4187 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
4189 spin_lock_irqsave(&phba->hbalock, iflag);
4192 /* Call the specified completion routine */
4193 if (cmdiocbp->iocb_cmpl) {
4194 spin_unlock_irqrestore(&phba->hbalock,
4196 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
4198 spin_lock_irqsave(&phba->hbalock,
4201 __lpfc_sli_release_iocbq(phba,
4206 case LPFC_UNKNOWN_IOCB:
4207 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4208 char adaptermsg[LPFC_MAX_ADPTMSG];
4209 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4210 memcpy(&adaptermsg[0], (uint8_t *)irsp,
4212 dev_warn(&((phba->pcidev)->dev),
4214 phba->brd_no, adaptermsg);
4216 /* Unknown IOCB command */
4217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4218 "0335 Unknown IOCB "
4219 "command Data: x%x "
4230 list_for_each_entry_safe(rspiocbp, next_iocb,
4231 &saveq->list, list) {
4232 list_del_init(&rspiocbp->list);
4233 __lpfc_sli_release_iocbq(phba, rspiocbp);
4235 __lpfc_sli_release_iocbq(phba, saveq);
4239 spin_unlock_irqrestore(&phba->hbalock, iflag);
4244 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4245 * @phba: Pointer to HBA context object.
4246 * @pring: Pointer to driver SLI ring object.
4247 * @mask: Host attention register mask for this ring.
4249 * This routine wraps the actual slow_ring event process routine from the
4250 * API jump table function pointer from the lpfc_hba struct.
4253 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4254 struct lpfc_sli_ring *pring, uint32_t mask)
4256 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4260 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4261 * @phba: Pointer to HBA context object.
4262 * @pring: Pointer to driver SLI ring object.
4263 * @mask: Host attention register mask for this ring.
4265 * This function is called from the worker thread when there is a ring event
4266 * for non-fcp rings. The caller does not hold any lock. The function will
4267 * remove each response iocb in the response ring and calls the handle
4268 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4271 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4272 struct lpfc_sli_ring *pring, uint32_t mask)
4274 struct lpfc_pgp *pgp;
4276 IOCB_t *irsp = NULL;
4277 struct lpfc_iocbq *rspiocbp = NULL;
4278 uint32_t portRspPut, portRspMax;
4279 unsigned long iflag;
4282 pgp = &phba->port_gp[pring->ringno];
4283 spin_lock_irqsave(&phba->hbalock, iflag);
4284 pring->stats.iocb_event++;
4287 * The next available response entry should never exceed the maximum
4288 * entries. If it does, treat it as an adapter hardware error.
4290 portRspMax = pring->sli.sli3.numRiocb;
4291 portRspPut = le32_to_cpu(pgp->rspPutInx);
4292 if (portRspPut >= portRspMax) {
4294 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4295 * rsp ring <portRspMax>
4297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4298 "0303 Ring %d handler: portRspPut %d "
4299 "is bigger than rsp ring %d\n",
4300 pring->ringno, portRspPut, portRspMax);
4302 phba->link_state = LPFC_HBA_ERROR;
4303 spin_unlock_irqrestore(&phba->hbalock, iflag);
4305 phba->work_hs = HS_FFER3;
4306 lpfc_handle_eratt(phba);
4312 while (pring->sli.sli3.rspidx != portRspPut) {
4314 * Build a completion list and call the appropriate handler.
4315 * The process is to get the next available response iocb, get
4316 * a free iocb from the list, copy the response data into the
4317 * free iocb, insert to the continuation list, and update the
4318 * next response index to slim. This process makes response
4319 * iocb's in the ring available to DMA as fast as possible but
4320 * pays a penalty for a copy operation. Since the iocb is
4321 * only 32 bytes, this penalty is considered small relative to
4322 * the PCI reads for register values and a slim write. When
4323 * the ulpLe field is set, the entire Command has been
4326 entry = lpfc_resp_iocb(phba, pring);
4328 phba->last_completion_time = jiffies;
4329 rspiocbp = __lpfc_sli_get_iocbq(phba);
4330 if (rspiocbp == NULL) {
4331 printk(KERN_ERR "%s: out of buffers! Failing "
4332 "completion.\n", __func__);
4336 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4337 phba->iocb_rsp_size);
4338 irsp = &rspiocbp->iocb;
4340 if (++pring->sli.sli3.rspidx >= portRspMax)
4341 pring->sli.sli3.rspidx = 0;
4343 if (pring->ringno == LPFC_ELS_RING) {
4344 lpfc_debugfs_slow_ring_trc(phba,
4345 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4346 *(((uint32_t *) irsp) + 4),
4347 *(((uint32_t *) irsp) + 6),
4348 *(((uint32_t *) irsp) + 7));
4351 writel(pring->sli.sli3.rspidx,
4352 &phba->host_gp[pring->ringno].rspGetInx);
4354 spin_unlock_irqrestore(&phba->hbalock, iflag);
4355 /* Handle the response IOCB */
4356 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4357 spin_lock_irqsave(&phba->hbalock, iflag);
4360 * If the port response put pointer has not been updated, sync
4361 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4362 * response put pointer.
4364 if (pring->sli.sli3.rspidx == portRspPut) {
4365 portRspPut = le32_to_cpu(pgp->rspPutInx);
4367 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4369 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4370 /* At least one response entry has been freed */
4371 pring->stats.iocb_rsp_full++;
4372 /* SET RxRE_RSP in Chip Att register */
4373 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4374 writel(status, phba->CAregaddr);
4375 readl(phba->CAregaddr); /* flush */
4377 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4378 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4379 pring->stats.iocb_cmd_empty++;
4381 /* Force update of the local copy of cmdGetInx */
4382 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4383 lpfc_sli_resume_iocb(phba, pring);
4385 if ((pring->lpfc_sli_cmd_available))
4386 (pring->lpfc_sli_cmd_available) (phba, pring);
4390 spin_unlock_irqrestore(&phba->hbalock, iflag);
4395 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4396 * @phba: Pointer to HBA context object.
4397 * @pring: Pointer to driver SLI ring object.
4398 * @mask: Host attention register mask for this ring.
4400 * This function is called from the worker thread when there is a pending
4401 * ELS response iocb on the driver internal slow-path response iocb worker
4402 * queue. The caller does not hold any lock. The function will remove each
4403 * response iocb from the response worker queue and calls the handle
4404 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4407 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4408 struct lpfc_sli_ring *pring, uint32_t mask)
4410 struct lpfc_iocbq *irspiocbq;
4411 struct hbq_dmabuf *dmabuf;
4412 struct lpfc_cq_event *cq_event;
4413 unsigned long iflag;
4416 spin_lock_irqsave(&phba->hbalock, iflag);
4417 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4418 spin_unlock_irqrestore(&phba->hbalock, iflag);
4419 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4420 /* Get the response iocb from the head of work queue */
4421 spin_lock_irqsave(&phba->hbalock, iflag);
4422 list_remove_head(&phba->sli4_hba.sp_queue_event,
4423 cq_event, struct lpfc_cq_event, list);
4424 spin_unlock_irqrestore(&phba->hbalock, iflag);
4426 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4427 case CQE_CODE_COMPL_WQE:
4428 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4430 /* Translate ELS WCQE to response IOCBQ */
4431 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4434 lpfc_sli_sp_handle_rspiocb(phba, pring,
4438 case CQE_CODE_RECEIVE:
4439 case CQE_CODE_RECEIVE_V1:
4440 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4442 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4449 /* Limit the number of events to 64 to avoid soft lockups */
4456 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4457 * @phba: Pointer to HBA context object.
4458 * @pring: Pointer to driver SLI ring object.
4460 * This function aborts all iocbs in the given ring and frees all the iocb
4461 * objects in txq. This function issues an abort iocb for all the iocb commands
4462 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4463 * the return of this function. The caller is not required to hold any locks.
4466 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4468 LIST_HEAD(completions);
4469 struct lpfc_iocbq *iocb, *next_iocb;
4471 if (pring->ringno == LPFC_ELS_RING) {
4472 lpfc_fabric_abort_hba(phba);
4475 /* Error everything on txq and txcmplq
4478 if (phba->sli_rev >= LPFC_SLI_REV4) {
4479 spin_lock_irq(&pring->ring_lock);
4480 list_splice_init(&pring->txq, &completions);
4482 spin_unlock_irq(&pring->ring_lock);
4484 spin_lock_irq(&phba->hbalock);
4485 /* Next issue ABTS for everything on the txcmplq */
4486 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4487 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4488 spin_unlock_irq(&phba->hbalock);
4490 spin_lock_irq(&phba->hbalock);
4491 list_splice_init(&pring->txq, &completions);
4494 /* Next issue ABTS for everything on the txcmplq */
4495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4496 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4497 spin_unlock_irq(&phba->hbalock);
4499 /* Make sure HBA is alive */
4500 lpfc_issue_hb_tmo(phba);
4502 /* Cancel all the IOCBs from the completions list */
4503 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4508 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4509 * @phba: Pointer to HBA context object.
4511 * This function aborts all iocbs in FCP rings and frees all the iocb
4512 * objects in txq. This function issues an abort iocb for all the iocb commands
4513 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4514 * the return of this function. The caller is not required to hold any locks.
4517 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4519 struct lpfc_sli *psli = &phba->sli;
4520 struct lpfc_sli_ring *pring;
4523 /* Look on all the FCP Rings for the iotag */
4524 if (phba->sli_rev >= LPFC_SLI_REV4) {
4525 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4526 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4527 lpfc_sli_abort_iocb_ring(phba, pring);
4530 pring = &psli->sli3_ring[LPFC_FCP_RING];
4531 lpfc_sli_abort_iocb_ring(phba, pring);
4536 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4537 * @phba: Pointer to HBA context object.
4539 * This function flushes all iocbs in the IO ring and frees all the iocb
4540 * objects in txq and txcmplq. This function will not issue abort iocbs
4541 * for all the iocb commands in txcmplq, they will just be returned with
4542 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4543 * slot has been permanently disabled.
4546 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4550 struct lpfc_sli *psli = &phba->sli;
4551 struct lpfc_sli_ring *pring;
4553 struct lpfc_iocbq *piocb, *next_iocb;
4555 spin_lock_irq(&phba->hbalock);
4556 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4557 !phba->sli4_hba.hdwq) {
4558 spin_unlock_irq(&phba->hbalock);
4561 /* Indicate the I/O queues are flushed */
4562 phba->hba_flag |= HBA_IOQ_FLUSH;
4563 spin_unlock_irq(&phba->hbalock);
4565 /* Look on all the FCP Rings for the iotag */
4566 if (phba->sli_rev >= LPFC_SLI_REV4) {
4567 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4568 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4570 spin_lock_irq(&pring->ring_lock);
4571 /* Retrieve everything on txq */
4572 list_splice_init(&pring->txq, &txq);
4573 list_for_each_entry_safe(piocb, next_iocb,
4574 &pring->txcmplq, list)
4575 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4576 /* Retrieve everything on the txcmplq */
4577 list_splice_init(&pring->txcmplq, &txcmplq);
4579 pring->txcmplq_cnt = 0;
4580 spin_unlock_irq(&pring->ring_lock);
4583 lpfc_sli_cancel_iocbs(phba, &txq,
4584 IOSTAT_LOCAL_REJECT,
4586 /* Flush the txcmpq */
4587 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4588 IOSTAT_LOCAL_REJECT,
4592 pring = &psli->sli3_ring[LPFC_FCP_RING];
4594 spin_lock_irq(&phba->hbalock);
4595 /* Retrieve everything on txq */
4596 list_splice_init(&pring->txq, &txq);
4597 list_for_each_entry_safe(piocb, next_iocb,
4598 &pring->txcmplq, list)
4599 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4600 /* Retrieve everything on the txcmplq */
4601 list_splice_init(&pring->txcmplq, &txcmplq);
4603 pring->txcmplq_cnt = 0;
4604 spin_unlock_irq(&phba->hbalock);
4607 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4609 /* Flush the txcmpq */
4610 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4616 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4617 * @phba: Pointer to HBA context object.
4618 * @mask: Bit mask to be checked.
4620 * This function reads the host status register and compares
4621 * with the provided bit mask to check if HBA completed
4622 * the restart. This function will wait in a loop for the
4623 * HBA to complete restart. If the HBA does not restart within
4624 * 15 iterations, the function will reset the HBA again. The
4625 * function returns 1 when HBA fail to restart otherwise returns
4629 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4635 /* Read the HBA Host Status Register */
4636 if (lpfc_readl(phba->HSregaddr, &status))
4639 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4642 * Check status register every 100ms for 5 retries, then every
4643 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4644 * every 2.5 sec for 4.
4645 * Break our of the loop if errors occurred during init.
4647 while (((status & mask) != mask) &&
4648 !(status & HS_FFERM) &&
4660 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4661 lpfc_sli_brdrestart(phba);
4663 /* Read the HBA Host Status Register */
4664 if (lpfc_readl(phba->HSregaddr, &status)) {
4670 /* Check to see if any errors occurred during init */
4671 if ((status & HS_FFERM) || (i >= 20)) {
4672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4673 "2751 Adapter failed to restart, "
4674 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4676 readl(phba->MBslimaddr + 0xa8),
4677 readl(phba->MBslimaddr + 0xac));
4678 phba->link_state = LPFC_HBA_ERROR;
4686 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4687 * @phba: Pointer to HBA context object.
4688 * @mask: Bit mask to be checked.
4690 * This function checks the host status register to check if HBA is
4691 * ready. This function will wait in a loop for the HBA to be ready
4692 * If the HBA is not ready , the function will will reset the HBA PCI
4693 * function again. The function returns 1 when HBA fail to be ready
4694 * otherwise returns zero.
4697 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4702 /* Read the HBA Host Status Register */
4703 status = lpfc_sli4_post_status_check(phba);
4706 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4707 lpfc_sli_brdrestart(phba);
4708 status = lpfc_sli4_post_status_check(phba);
4711 /* Check to see if any errors occurred during init */
4713 phba->link_state = LPFC_HBA_ERROR;
4716 phba->sli4_hba.intr_enable = 0;
4718 phba->hba_flag &= ~HBA_SETUP;
4723 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4724 * @phba: Pointer to HBA context object.
4725 * @mask: Bit mask to be checked.
4727 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4728 * from the API jump table function pointer from the lpfc_hba struct.
4731 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4733 return phba->lpfc_sli_brdready(phba, mask);
4736 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4739 * lpfc_reset_barrier - Make HBA ready for HBA reset
4740 * @phba: Pointer to HBA context object.
4742 * This function is called before resetting an HBA. This function is called
4743 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4745 void lpfc_reset_barrier(struct lpfc_hba *phba)
4747 uint32_t __iomem *resp_buf;
4748 uint32_t __iomem *mbox_buf;
4749 volatile uint32_t mbox;
4750 uint32_t hc_copy, ha_copy, resp_data;
4754 lockdep_assert_held(&phba->hbalock);
4756 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4757 if (hdrtype != 0x80 ||
4758 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4759 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4763 * Tell the other part of the chip to suspend temporarily all
4766 resp_buf = phba->MBslimaddr;
4768 /* Disable the error attention */
4769 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4771 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4772 readl(phba->HCregaddr); /* flush */
4773 phba->link_flag |= LS_IGNORE_ERATT;
4775 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4777 if (ha_copy & HA_ERATT) {
4778 /* Clear Chip error bit */
4779 writel(HA_ERATT, phba->HAregaddr);
4780 phba->pport->stopped = 1;
4784 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4785 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4787 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4788 mbox_buf = phba->MBslimaddr;
4789 writel(mbox, mbox_buf);
4791 for (i = 0; i < 50; i++) {
4792 if (lpfc_readl((resp_buf + 1), &resp_data))
4794 if (resp_data != ~(BARRIER_TEST_PATTERN))
4800 if (lpfc_readl((resp_buf + 1), &resp_data))
4802 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4803 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4804 phba->pport->stopped)
4810 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4812 for (i = 0; i < 500; i++) {
4813 if (lpfc_readl(resp_buf, &resp_data))
4815 if (resp_data != mbox)
4824 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4826 if (!(ha_copy & HA_ERATT))
4832 if (readl(phba->HAregaddr) & HA_ERATT) {
4833 writel(HA_ERATT, phba->HAregaddr);
4834 phba->pport->stopped = 1;
4838 phba->link_flag &= ~LS_IGNORE_ERATT;
4839 writel(hc_copy, phba->HCregaddr);
4840 readl(phba->HCregaddr); /* flush */
4844 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4845 * @phba: Pointer to HBA context object.
4847 * This function issues a kill_board mailbox command and waits for
4848 * the error attention interrupt. This function is called for stopping
4849 * the firmware processing. The caller is not required to hold any
4850 * locks. This function calls lpfc_hba_down_post function to free
4851 * any pending commands after the kill. The function will return 1 when it
4852 * fails to kill the board else will return 0.
4855 lpfc_sli_brdkill(struct lpfc_hba *phba)
4857 struct lpfc_sli *psli;
4867 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4868 "0329 Kill HBA Data: x%x x%x\n",
4869 phba->pport->port_state, psli->sli_flag);
4871 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4875 /* Disable the error attention */
4876 spin_lock_irq(&phba->hbalock);
4877 if (lpfc_readl(phba->HCregaddr, &status)) {
4878 spin_unlock_irq(&phba->hbalock);
4879 mempool_free(pmb, phba->mbox_mem_pool);
4882 status &= ~HC_ERINT_ENA;
4883 writel(status, phba->HCregaddr);
4884 readl(phba->HCregaddr); /* flush */
4885 phba->link_flag |= LS_IGNORE_ERATT;
4886 spin_unlock_irq(&phba->hbalock);
4888 lpfc_kill_board(phba, pmb);
4889 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4890 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4892 if (retval != MBX_SUCCESS) {
4893 if (retval != MBX_BUSY)
4894 mempool_free(pmb, phba->mbox_mem_pool);
4895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4896 "2752 KILL_BOARD command failed retval %d\n",
4898 spin_lock_irq(&phba->hbalock);
4899 phba->link_flag &= ~LS_IGNORE_ERATT;
4900 spin_unlock_irq(&phba->hbalock);
4904 spin_lock_irq(&phba->hbalock);
4905 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4906 spin_unlock_irq(&phba->hbalock);
4908 mempool_free(pmb, phba->mbox_mem_pool);
4910 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4911 * attention every 100ms for 3 seconds. If we don't get ERATT after
4912 * 3 seconds we still set HBA_ERROR state because the status of the
4913 * board is now undefined.
4915 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4917 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4919 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4923 del_timer_sync(&psli->mbox_tmo);
4924 if (ha_copy & HA_ERATT) {
4925 writel(HA_ERATT, phba->HAregaddr);
4926 phba->pport->stopped = 1;
4928 spin_lock_irq(&phba->hbalock);
4929 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4930 psli->mbox_active = NULL;
4931 phba->link_flag &= ~LS_IGNORE_ERATT;
4932 spin_unlock_irq(&phba->hbalock);
4934 lpfc_hba_down_post(phba);
4935 phba->link_state = LPFC_HBA_ERROR;
4937 return ha_copy & HA_ERATT ? 0 : 1;
4941 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4942 * @phba: Pointer to HBA context object.
4944 * This function resets the HBA by writing HC_INITFF to the control
4945 * register. After the HBA resets, this function resets all the iocb ring
4946 * indices. This function disables PCI layer parity checking during
4948 * This function returns 0 always.
4949 * The caller is not required to hold any locks.
4952 lpfc_sli_brdreset(struct lpfc_hba *phba)
4954 struct lpfc_sli *psli;
4955 struct lpfc_sli_ring *pring;
4962 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4963 "0325 Reset HBA Data: x%x x%x\n",
4964 (phba->pport) ? phba->pport->port_state : 0,
4967 /* perform board reset */
4968 phba->fc_eventTag = 0;
4969 phba->link_events = 0;
4970 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4972 phba->pport->fc_myDID = 0;
4973 phba->pport->fc_prevDID = 0;
4976 /* Turn off parity checking and serr during the physical reset */
4977 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4980 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4982 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4984 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4986 /* Now toggle INITFF bit in the Host Control Register */
4987 writel(HC_INITFF, phba->HCregaddr);
4989 readl(phba->HCregaddr); /* flush */
4990 writel(0, phba->HCregaddr);
4991 readl(phba->HCregaddr); /* flush */
4993 /* Restore PCI cmd register */
4994 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4996 /* Initialize relevant SLI info */
4997 for (i = 0; i < psli->num_rings; i++) {
4998 pring = &psli->sli3_ring[i];
5000 pring->sli.sli3.rspidx = 0;
5001 pring->sli.sli3.next_cmdidx = 0;
5002 pring->sli.sli3.local_getidx = 0;
5003 pring->sli.sli3.cmdidx = 0;
5004 pring->missbufcnt = 0;
5007 phba->link_state = LPFC_WARM_START;
5012 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5013 * @phba: Pointer to HBA context object.
5015 * This function resets a SLI4 HBA. This function disables PCI layer parity
5016 * checking during resets the device. The caller is not required to hold
5019 * This function returns 0 on success else returns negative error code.
5022 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5024 struct lpfc_sli *psli = &phba->sli;
5029 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5030 "0295 Reset HBA Data: x%x x%x x%x\n",
5031 phba->pport->port_state, psli->sli_flag,
5034 /* perform board reset */
5035 phba->fc_eventTag = 0;
5036 phba->link_events = 0;
5037 phba->pport->fc_myDID = 0;
5038 phba->pport->fc_prevDID = 0;
5039 phba->hba_flag &= ~HBA_SETUP;
5041 spin_lock_irq(&phba->hbalock);
5042 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5043 phba->fcf.fcf_flag = 0;
5044 spin_unlock_irq(&phba->hbalock);
5046 /* Now physically reset the device */
5047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5048 "0389 Performing PCI function reset!\n");
5050 /* Turn off parity checking and serr during the physical reset */
5051 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5052 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5053 "3205 PCI read Config failed\n");
5057 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5058 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5060 /* Perform FCoE PCI function reset before freeing queue memory */
5061 rc = lpfc_pci_function_reset(phba);
5063 /* Restore PCI cmd register */
5064 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5070 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5071 * @phba: Pointer to HBA context object.
5073 * This function is called in the SLI initialization code path to
5074 * restart the HBA. The caller is not required to hold any lock.
5075 * This function writes MBX_RESTART mailbox command to the SLIM and
5076 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5077 * function to free any pending commands. The function enables
5078 * POST only during the first initialization. The function returns zero.
5079 * The function does not guarantee completion of MBX_RESTART mailbox
5080 * command before the return of this function.
5083 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5086 struct lpfc_sli *psli;
5087 volatile uint32_t word0;
5088 void __iomem *to_slim;
5089 uint32_t hba_aer_enabled;
5091 spin_lock_irq(&phba->hbalock);
5093 /* Take PCIe device Advanced Error Reporting (AER) state */
5094 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5099 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5100 "0337 Restart HBA Data: x%x x%x\n",
5101 (phba->pport) ? phba->pport->port_state : 0,
5105 mb = (MAILBOX_t *) &word0;
5106 mb->mbxCommand = MBX_RESTART;
5109 lpfc_reset_barrier(phba);
5111 to_slim = phba->MBslimaddr;
5112 writel(*(uint32_t *) mb, to_slim);
5113 readl(to_slim); /* flush */
5115 /* Only skip post after fc_ffinit is completed */
5116 if (phba->pport && phba->pport->port_state)
5117 word0 = 1; /* This is really setting up word1 */
5119 word0 = 0; /* This is really setting up word1 */
5120 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5121 writel(*(uint32_t *) mb, to_slim);
5122 readl(to_slim); /* flush */
5124 lpfc_sli_brdreset(phba);
5126 phba->pport->stopped = 0;
5127 phba->link_state = LPFC_INIT_START;
5129 spin_unlock_irq(&phba->hbalock);
5131 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5132 psli->stats_start = ktime_get_seconds();
5134 /* Give the INITFF and Post time to settle. */
5137 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5138 if (hba_aer_enabled)
5139 pci_disable_pcie_error_reporting(phba->pcidev);
5141 lpfc_hba_down_post(phba);
5147 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5148 * @phba: Pointer to HBA context object.
5150 * This function is called in the SLI initialization code path to restart
5151 * a SLI4 HBA. The caller is not required to hold any lock.
5152 * At the end of the function, it calls lpfc_hba_down_post function to
5153 * free any pending commands.
5156 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5158 struct lpfc_sli *psli = &phba->sli;
5159 uint32_t hba_aer_enabled;
5163 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5164 "0296 Restart HBA Data: x%x x%x\n",
5165 phba->pport->port_state, psli->sli_flag);
5167 /* Take PCIe device Advanced Error Reporting (AER) state */
5168 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5170 rc = lpfc_sli4_brdreset(phba);
5172 phba->link_state = LPFC_HBA_ERROR;
5173 goto hba_down_queue;
5176 spin_lock_irq(&phba->hbalock);
5177 phba->pport->stopped = 0;
5178 phba->link_state = LPFC_INIT_START;
5180 spin_unlock_irq(&phba->hbalock);
5182 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5183 psli->stats_start = ktime_get_seconds();
5185 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5186 if (hba_aer_enabled)
5187 pci_disable_pcie_error_reporting(phba->pcidev);
5190 lpfc_hba_down_post(phba);
5191 lpfc_sli4_queue_destroy(phba);
5197 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5198 * @phba: Pointer to HBA context object.
5200 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5201 * API jump table function pointer from the lpfc_hba struct.
5204 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5206 return phba->lpfc_sli_brdrestart(phba);
5210 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5211 * @phba: Pointer to HBA context object.
5213 * This function is called after a HBA restart to wait for successful
5214 * restart of the HBA. Successful restart of the HBA is indicated by
5215 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5216 * iteration, the function will restart the HBA again. The function returns
5217 * zero if HBA successfully restarted else returns negative error code.
5220 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5222 uint32_t status, i = 0;
5224 /* Read the HBA Host Status Register */
5225 if (lpfc_readl(phba->HSregaddr, &status))
5228 /* Check status register to see what current state is */
5230 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5232 /* Check every 10ms for 10 retries, then every 100ms for 90
5233 * retries, then every 1 sec for 50 retires for a total of
5234 * ~60 seconds before reset the board again and check every
5235 * 1 sec for 50 retries. The up to 60 seconds before the
5236 * board ready is required by the Falcon FIPS zeroization
5237 * complete, and any reset the board in between shall cause
5238 * restart of zeroization, further delay the board ready.
5241 /* Adapter failed to init, timeout, status reg
5243 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5244 "0436 Adapter failed to init, "
5245 "timeout, status reg x%x, "
5246 "FW Data: A8 x%x AC x%x\n", status,
5247 readl(phba->MBslimaddr + 0xa8),
5248 readl(phba->MBslimaddr + 0xac));
5249 phba->link_state = LPFC_HBA_ERROR;
5253 /* Check to see if any errors occurred during init */
5254 if (status & HS_FFERM) {
5255 /* ERROR: During chipset initialization */
5256 /* Adapter failed to init, chipset, status reg
5258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5259 "0437 Adapter failed to init, "
5260 "chipset, status reg x%x, "
5261 "FW Data: A8 x%x AC x%x\n", status,
5262 readl(phba->MBslimaddr + 0xa8),
5263 readl(phba->MBslimaddr + 0xac));
5264 phba->link_state = LPFC_HBA_ERROR;
5277 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5278 lpfc_sli_brdrestart(phba);
5280 /* Read the HBA Host Status Register */
5281 if (lpfc_readl(phba->HSregaddr, &status))
5285 /* Check to see if any errors occurred during init */
5286 if (status & HS_FFERM) {
5287 /* ERROR: During chipset initialization */
5288 /* Adapter failed to init, chipset, status reg <status> */
5289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5290 "0438 Adapter failed to init, chipset, "
5292 "FW Data: A8 x%x AC x%x\n", status,
5293 readl(phba->MBslimaddr + 0xa8),
5294 readl(phba->MBslimaddr + 0xac));
5295 phba->link_state = LPFC_HBA_ERROR;
5299 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5301 /* Clear all interrupt enable conditions */
5302 writel(0, phba->HCregaddr);
5303 readl(phba->HCregaddr); /* flush */
5305 /* setup host attn register */
5306 writel(0xffffffff, phba->HAregaddr);
5307 readl(phba->HAregaddr); /* flush */
5312 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5314 * This function calculates and returns the number of HBQs required to be
5318 lpfc_sli_hbq_count(void)
5320 return ARRAY_SIZE(lpfc_hbq_defs);
5324 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5326 * This function adds the number of hbq entries in every HBQ to get
5327 * the total number of hbq entries required for the HBA and returns
5331 lpfc_sli_hbq_entry_count(void)
5333 int hbq_count = lpfc_sli_hbq_count();
5337 for (i = 0; i < hbq_count; ++i)
5338 count += lpfc_hbq_defs[i]->entry_count;
5343 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5345 * This function calculates amount of memory required for all hbq entries
5346 * to be configured and returns the total memory required.
5349 lpfc_sli_hbq_size(void)
5351 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5355 * lpfc_sli_hbq_setup - configure and initialize HBQs
5356 * @phba: Pointer to HBA context object.
5358 * This function is called during the SLI initialization to configure
5359 * all the HBQs and post buffers to the HBQ. The caller is not
5360 * required to hold any locks. This function will return zero if successful
5361 * else it will return negative error code.
5364 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5366 int hbq_count = lpfc_sli_hbq_count();
5370 uint32_t hbq_entry_index;
5372 /* Get a Mailbox buffer to setup mailbox
5373 * commands for HBA initialization
5375 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5382 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5383 phba->link_state = LPFC_INIT_MBX_CMDS;
5384 phba->hbq_in_use = 1;
5386 hbq_entry_index = 0;
5387 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5388 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5389 phba->hbqs[hbqno].hbqPutIdx = 0;
5390 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5391 phba->hbqs[hbqno].entry_count =
5392 lpfc_hbq_defs[hbqno]->entry_count;
5393 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5394 hbq_entry_index, pmb);
5395 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5397 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5398 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5399 mbxStatus <status>, ring <num> */
5401 lpfc_printf_log(phba, KERN_ERR,
5402 LOG_SLI | LOG_VPORT,
5403 "1805 Adapter failed to init. "
5404 "Data: x%x x%x x%x\n",
5406 pmbox->mbxStatus, hbqno);
5408 phba->link_state = LPFC_HBA_ERROR;
5409 mempool_free(pmb, phba->mbox_mem_pool);
5413 phba->hbq_count = hbq_count;
5415 mempool_free(pmb, phba->mbox_mem_pool);
5417 /* Initially populate or replenish the HBQs */
5418 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5419 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5424 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5425 * @phba: Pointer to HBA context object.
5427 * This function is called during the SLI initialization to configure
5428 * all the HBQs and post buffers to the HBQ. The caller is not
5429 * required to hold any locks. This function will return zero if successful
5430 * else it will return negative error code.
5433 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5435 phba->hbq_in_use = 1;
5437 * Specific case when the MDS diagnostics is enabled and supported.
5438 * The receive buffer count is truncated to manage the incoming
5441 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5442 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5443 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5445 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5446 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5447 phba->hbq_count = 1;
5448 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5449 /* Initially populate or replenish the HBQs */
5454 * lpfc_sli_config_port - Issue config port mailbox command
5455 * @phba: Pointer to HBA context object.
5456 * @sli_mode: sli mode - 2/3
5458 * This function is called by the sli initialization code path
5459 * to issue config_port mailbox command. This function restarts the
5460 * HBA firmware and issues a config_port mailbox command to configure
5461 * the SLI interface in the sli mode specified by sli_mode
5462 * variable. The caller is not required to hold any locks.
5463 * The function returns 0 if successful, else returns negative error
5467 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5470 uint32_t resetcount = 0, rc = 0, done = 0;
5472 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5474 phba->link_state = LPFC_HBA_ERROR;
5478 phba->sli_rev = sli_mode;
5479 while (resetcount < 2 && !done) {
5480 spin_lock_irq(&phba->hbalock);
5481 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5482 spin_unlock_irq(&phba->hbalock);
5483 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5484 lpfc_sli_brdrestart(phba);
5485 rc = lpfc_sli_chipset_init(phba);
5489 spin_lock_irq(&phba->hbalock);
5490 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5491 spin_unlock_irq(&phba->hbalock);
5494 /* Call pre CONFIG_PORT mailbox command initialization. A
5495 * value of 0 means the call was successful. Any other
5496 * nonzero value is a failure, but if ERESTART is returned,
5497 * the driver may reset the HBA and try again.
5499 rc = lpfc_config_port_prep(phba);
5500 if (rc == -ERESTART) {
5501 phba->link_state = LPFC_LINK_UNKNOWN;
5506 phba->link_state = LPFC_INIT_MBX_CMDS;
5507 lpfc_config_port(phba, pmb);
5508 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5509 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5510 LPFC_SLI3_HBQ_ENABLED |
5511 LPFC_SLI3_CRP_ENABLED |
5512 LPFC_SLI3_DSS_ENABLED);
5513 if (rc != MBX_SUCCESS) {
5514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5515 "0442 Adapter failed to init, mbxCmd x%x "
5516 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5517 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5518 spin_lock_irq(&phba->hbalock);
5519 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5520 spin_unlock_irq(&phba->hbalock);
5523 /* Allow asynchronous mailbox command to go through */
5524 spin_lock_irq(&phba->hbalock);
5525 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5526 spin_unlock_irq(&phba->hbalock);
5529 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5530 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5531 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5532 "3110 Port did not grant ASABT\n");
5537 goto do_prep_failed;
5539 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5540 if (!pmb->u.mb.un.varCfgPort.cMA) {
5542 goto do_prep_failed;
5544 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5545 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5546 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5547 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5548 phba->max_vpi : phba->max_vports;
5552 if (pmb->u.mb.un.varCfgPort.gerbm)
5553 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5554 if (pmb->u.mb.un.varCfgPort.gcrp)
5555 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5557 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5558 phba->port_gp = phba->mbox->us.s3_pgp.port;
5560 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5561 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5562 phba->cfg_enable_bg = 0;
5563 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5565 "0443 Adapter did not grant "
5570 phba->hbq_get = NULL;
5571 phba->port_gp = phba->mbox->us.s2.port;
5575 mempool_free(pmb, phba->mbox_mem_pool);
5581 * lpfc_sli_hba_setup - SLI initialization function
5582 * @phba: Pointer to HBA context object.
5584 * This function is the main SLI initialization function. This function
5585 * is called by the HBA initialization code, HBA reset code and HBA
5586 * error attention handler code. Caller is not required to hold any
5587 * locks. This function issues config_port mailbox command to configure
5588 * the SLI, setup iocb rings and HBQ rings. In the end the function
5589 * calls the config_port_post function to issue init_link mailbox
5590 * command and to start the discovery. The function will return zero
5591 * if successful, else it will return negative error code.
5594 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5600 /* Enable ISR already does config_port because of config_msi mbx */
5601 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5602 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5605 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5607 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5609 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5610 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5611 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5613 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5614 "2709 This device supports "
5615 "Advanced Error Reporting (AER)\n");
5616 spin_lock_irq(&phba->hbalock);
5617 phba->hba_flag |= HBA_AER_ENABLED;
5618 spin_unlock_irq(&phba->hbalock);
5620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5621 "2708 This device does not support "
5622 "Advanced Error Reporting (AER): %d\n",
5624 phba->cfg_aer_support = 0;
5628 if (phba->sli_rev == 3) {
5629 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5630 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5632 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5633 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5634 phba->sli3_options = 0;
5637 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5638 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5639 phba->sli_rev, phba->max_vpi);
5640 rc = lpfc_sli_ring_map(phba);
5643 goto lpfc_sli_hba_setup_error;
5645 /* Initialize VPIs. */
5646 if (phba->sli_rev == LPFC_SLI_REV3) {
5648 * The VPI bitmask and physical ID array are allocated
5649 * and initialized once only - at driver load. A port
5650 * reset doesn't need to reinitialize this memory.
5652 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5653 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5654 phba->vpi_bmask = kcalloc(longs,
5655 sizeof(unsigned long),
5657 if (!phba->vpi_bmask) {
5659 goto lpfc_sli_hba_setup_error;
5662 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5665 if (!phba->vpi_ids) {
5666 kfree(phba->vpi_bmask);
5668 goto lpfc_sli_hba_setup_error;
5670 for (i = 0; i < phba->max_vpi; i++)
5671 phba->vpi_ids[i] = i;
5676 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5677 rc = lpfc_sli_hbq_setup(phba);
5679 goto lpfc_sli_hba_setup_error;
5681 spin_lock_irq(&phba->hbalock);
5682 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5683 spin_unlock_irq(&phba->hbalock);
5685 rc = lpfc_config_port_post(phba);
5687 goto lpfc_sli_hba_setup_error;
5691 lpfc_sli_hba_setup_error:
5692 phba->link_state = LPFC_HBA_ERROR;
5693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5694 "0445 Firmware initialization failed\n");
5699 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5700 * @phba: Pointer to HBA context object.
5702 * This function issue a dump mailbox command to read config region
5703 * 23 and parse the records in the region and populate driver
5707 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5709 LPFC_MBOXQ_t *mboxq;
5710 struct lpfc_dmabuf *mp;
5711 struct lpfc_mqe *mqe;
5712 uint32_t data_length;
5715 /* Program the default value of vlan_id and fc_map */
5716 phba->valid_vlan = 0;
5717 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5718 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5719 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5721 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5725 mqe = &mboxq->u.mqe;
5726 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5728 goto out_free_mboxq;
5731 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5732 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5734 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5735 "(%d):2571 Mailbox cmd x%x Status x%x "
5736 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5737 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5738 "CQ: x%x x%x x%x x%x\n",
5739 mboxq->vport ? mboxq->vport->vpi : 0,
5740 bf_get(lpfc_mqe_command, mqe),
5741 bf_get(lpfc_mqe_status, mqe),
5742 mqe->un.mb_words[0], mqe->un.mb_words[1],
5743 mqe->un.mb_words[2], mqe->un.mb_words[3],
5744 mqe->un.mb_words[4], mqe->un.mb_words[5],
5745 mqe->un.mb_words[6], mqe->un.mb_words[7],
5746 mqe->un.mb_words[8], mqe->un.mb_words[9],
5747 mqe->un.mb_words[10], mqe->un.mb_words[11],
5748 mqe->un.mb_words[12], mqe->un.mb_words[13],
5749 mqe->un.mb_words[14], mqe->un.mb_words[15],
5750 mqe->un.mb_words[16], mqe->un.mb_words[50],
5752 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5753 mboxq->mcqe.trailer);
5756 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5759 goto out_free_mboxq;
5761 data_length = mqe->un.mb_words[5];
5762 if (data_length > DMP_RGN23_SIZE) {
5763 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5766 goto out_free_mboxq;
5769 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5770 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5775 mempool_free(mboxq, phba->mbox_mem_pool);
5780 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5781 * @phba: pointer to lpfc hba data structure.
5782 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5783 * @vpd: pointer to the memory to hold resulting port vpd data.
5784 * @vpd_size: On input, the number of bytes allocated to @vpd.
5785 * On output, the number of data bytes in @vpd.
5787 * This routine executes a READ_REV SLI4 mailbox command. In
5788 * addition, this routine gets the port vpd data.
5792 * -ENOMEM - could not allocated memory.
5795 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5796 uint8_t *vpd, uint32_t *vpd_size)
5800 struct lpfc_dmabuf *dmabuf;
5801 struct lpfc_mqe *mqe;
5803 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5808 * Get a DMA buffer for the vpd data resulting from the READ_REV
5811 dma_size = *vpd_size;
5812 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5813 &dmabuf->phys, GFP_KERNEL);
5814 if (!dmabuf->virt) {
5820 * The SLI4 implementation of READ_REV conflicts at word1,
5821 * bits 31:16 and SLI4 adds vpd functionality not present
5822 * in SLI3. This code corrects the conflicts.
5824 lpfc_read_rev(phba, mboxq);
5825 mqe = &mboxq->u.mqe;
5826 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5827 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5828 mqe->un.read_rev.word1 &= 0x0000FFFF;
5829 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5830 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5832 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5834 dma_free_coherent(&phba->pcidev->dev, dma_size,
5835 dmabuf->virt, dmabuf->phys);
5841 * The available vpd length cannot be bigger than the
5842 * DMA buffer passed to the port. Catch the less than
5843 * case and update the caller's size.
5845 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5846 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5848 memcpy(vpd, dmabuf->virt, *vpd_size);
5850 dma_free_coherent(&phba->pcidev->dev, dma_size,
5851 dmabuf->virt, dmabuf->phys);
5857 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5858 * @phba: pointer to lpfc hba data structure.
5860 * This routine retrieves SLI4 device physical port name this PCI function
5865 * otherwise - failed to retrieve controller attributes
5868 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5870 LPFC_MBOXQ_t *mboxq;
5871 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5872 struct lpfc_controller_attribute *cntl_attr;
5873 void *virtaddr = NULL;
5874 uint32_t alloclen, reqlen;
5875 uint32_t shdr_status, shdr_add_status;
5876 union lpfc_sli4_cfg_shdr *shdr;
5879 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5883 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5884 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5885 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5886 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5887 LPFC_SLI4_MBX_NEMBED);
5889 if (alloclen < reqlen) {
5890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5891 "3084 Allocated DMA memory size (%d) is "
5892 "less than the requested DMA memory size "
5893 "(%d)\n", alloclen, reqlen);
5895 goto out_free_mboxq;
5897 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5898 virtaddr = mboxq->sge_array->addr[0];
5899 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5900 shdr = &mbx_cntl_attr->cfg_shdr;
5901 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5902 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5903 if (shdr_status || shdr_add_status || rc) {
5904 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5905 "3085 Mailbox x%x (x%x/x%x) failed, "
5906 "rc:x%x, status:x%x, add_status:x%x\n",
5907 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5908 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5909 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5910 rc, shdr_status, shdr_add_status);
5912 goto out_free_mboxq;
5915 cntl_attr = &mbx_cntl_attr->cntl_attr;
5916 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5917 phba->sli4_hba.lnk_info.lnk_tp =
5918 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5919 phba->sli4_hba.lnk_info.lnk_no =
5920 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5921 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5922 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
5924 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5925 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5926 sizeof(phba->BIOSVersion));
5928 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5929 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
5930 "flash_id: x%02x, asic_rev: x%02x\n",
5931 phba->sli4_hba.lnk_info.lnk_tp,
5932 phba->sli4_hba.lnk_info.lnk_no,
5933 phba->BIOSVersion, phba->sli4_hba.flash_id,
5934 phba->sli4_hba.asic_rev);
5936 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5937 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5939 mempool_free(mboxq, phba->mbox_mem_pool);
5944 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5945 * @phba: pointer to lpfc hba data structure.
5947 * This routine retrieves SLI4 device physical port name this PCI function
5952 * otherwise - failed to retrieve physical port name
5955 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5957 LPFC_MBOXQ_t *mboxq;
5958 struct lpfc_mbx_get_port_name *get_port_name;
5959 uint32_t shdr_status, shdr_add_status;
5960 union lpfc_sli4_cfg_shdr *shdr;
5961 char cport_name = 0;
5964 /* We assume nothing at this point */
5965 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5966 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5968 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5971 /* obtain link type and link number via READ_CONFIG */
5972 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5973 lpfc_sli4_read_config(phba);
5974 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5975 goto retrieve_ppname;
5977 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5978 rc = lpfc_sli4_get_ctl_attr(phba);
5980 goto out_free_mboxq;
5983 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5984 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5985 sizeof(struct lpfc_mbx_get_port_name) -
5986 sizeof(struct lpfc_sli4_cfg_mhdr),
5987 LPFC_SLI4_MBX_EMBED);
5988 get_port_name = &mboxq->u.mqe.un.get_port_name;
5989 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5990 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5991 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5992 phba->sli4_hba.lnk_info.lnk_tp);
5993 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5994 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5995 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5996 if (shdr_status || shdr_add_status || rc) {
5997 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5998 "3087 Mailbox x%x (x%x/x%x) failed: "
5999 "rc:x%x, status:x%x, add_status:x%x\n",
6000 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6001 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6002 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6003 rc, shdr_status, shdr_add_status);
6005 goto out_free_mboxq;
6007 switch (phba->sli4_hba.lnk_info.lnk_no) {
6008 case LPFC_LINK_NUMBER_0:
6009 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6010 &get_port_name->u.response);
6011 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6013 case LPFC_LINK_NUMBER_1:
6014 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6015 &get_port_name->u.response);
6016 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6018 case LPFC_LINK_NUMBER_2:
6019 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6020 &get_port_name->u.response);
6021 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6023 case LPFC_LINK_NUMBER_3:
6024 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6025 &get_port_name->u.response);
6026 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6032 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6033 phba->Port[0] = cport_name;
6034 phba->Port[1] = '\0';
6035 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6036 "3091 SLI get port name: %s\n", phba->Port);
6040 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6041 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6043 mempool_free(mboxq, phba->mbox_mem_pool);
6048 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6049 * @phba: pointer to lpfc hba data structure.
6051 * This routine is called to explicitly arm the SLI4 device's completion and
6055 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6058 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6059 struct lpfc_sli4_hdw_queue *qp;
6060 struct lpfc_queue *eq;
6062 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6063 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6064 if (sli4_hba->nvmels_cq)
6065 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6068 if (sli4_hba->hdwq) {
6069 /* Loop thru all Hardware Queues */
6070 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6071 qp = &sli4_hba->hdwq[qidx];
6072 /* ARM the corresponding CQ */
6073 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6077 /* Loop thru all IRQ vectors */
6078 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6079 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6080 /* ARM the corresponding EQ */
6081 sli4_hba->sli4_write_eq_db(phba, eq,
6082 0, LPFC_QUEUE_REARM);
6086 if (phba->nvmet_support) {
6087 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6088 sli4_hba->sli4_write_cq_db(phba,
6089 sli4_hba->nvmet_cqset[qidx], 0,
6096 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6097 * @phba: Pointer to HBA context object.
6098 * @type: The resource extent type.
6099 * @extnt_count: buffer to hold port available extent count.
6100 * @extnt_size: buffer to hold element count per extent.
6102 * This function calls the port and retrievs the number of available
6103 * extents and their size for a particular extent type.
6105 * Returns: 0 if successful. Nonzero otherwise.
6108 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6109 uint16_t *extnt_count, uint16_t *extnt_size)
6114 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6117 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6121 /* Find out how many extents are available for this resource type */
6122 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6123 sizeof(struct lpfc_sli4_cfg_mhdr));
6124 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6125 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6126 length, LPFC_SLI4_MBX_EMBED);
6128 /* Send an extents count of 0 - the GET doesn't use it. */
6129 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6130 LPFC_SLI4_MBX_EMBED);
6136 if (!phba->sli4_hba.intr_enable)
6137 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6139 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6140 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6147 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6148 if (bf_get(lpfc_mbox_hdr_status,
6149 &rsrc_info->header.cfg_shdr.response)) {
6150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6151 "2930 Failed to get resource extents "
6152 "Status 0x%x Add'l Status 0x%x\n",
6153 bf_get(lpfc_mbox_hdr_status,
6154 &rsrc_info->header.cfg_shdr.response),
6155 bf_get(lpfc_mbox_hdr_add_status,
6156 &rsrc_info->header.cfg_shdr.response));
6161 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6163 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6167 "3162 Retrieved extents type-%d from port: count:%d, "
6168 "size:%d\n", type, *extnt_count, *extnt_size);
6171 mempool_free(mbox, phba->mbox_mem_pool);
6176 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6177 * @phba: Pointer to HBA context object.
6178 * @type: The extent type to check.
6180 * This function reads the current available extents from the port and checks
6181 * if the extent count or extent size has changed since the last access.
6182 * Callers use this routine post port reset to understand if there is a
6183 * extent reprovisioning requirement.
6186 * -Error: error indicates problem.
6187 * 1: Extent count or size has changed.
6191 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6193 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6194 uint16_t size_diff, rsrc_ext_size;
6196 struct lpfc_rsrc_blks *rsrc_entry;
6197 struct list_head *rsrc_blk_list = NULL;
6201 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6208 case LPFC_RSC_TYPE_FCOE_RPI:
6209 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6211 case LPFC_RSC_TYPE_FCOE_VPI:
6212 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6214 case LPFC_RSC_TYPE_FCOE_XRI:
6215 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6217 case LPFC_RSC_TYPE_FCOE_VFI:
6218 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6224 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6226 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6230 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6237 * lpfc_sli4_cfg_post_extnts -
6238 * @phba: Pointer to HBA context object.
6239 * @extnt_cnt: number of available extents.
6240 * @type: the extent type (rpi, xri, vfi, vpi).
6241 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6242 * @mbox: pointer to the caller's allocated mailbox structure.
6244 * This function executes the extents allocation request. It also
6245 * takes care of the amount of memory needed to allocate or get the
6246 * allocated extents. It is the caller's responsibility to evaluate
6250 * -Error: Error value describes the condition found.
6254 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6255 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6260 uint32_t alloc_len, mbox_tmo;
6262 /* Calculate the total requested length of the dma memory */
6263 req_len = extnt_cnt * sizeof(uint16_t);
6266 * Calculate the size of an embedded mailbox. The uint32_t
6267 * accounts for extents-specific word.
6269 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6273 * Presume the allocation and response will fit into an embedded
6274 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6276 *emb = LPFC_SLI4_MBX_EMBED;
6277 if (req_len > emb_len) {
6278 req_len = extnt_cnt * sizeof(uint16_t) +
6279 sizeof(union lpfc_sli4_cfg_shdr) +
6281 *emb = LPFC_SLI4_MBX_NEMBED;
6284 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6285 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6287 if (alloc_len < req_len) {
6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6289 "2982 Allocated DMA memory size (x%x) is "
6290 "less than the requested DMA memory "
6291 "size (x%x)\n", alloc_len, req_len);
6294 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6298 if (!phba->sli4_hba.intr_enable)
6299 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6301 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6302 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6311 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6312 * @phba: Pointer to HBA context object.
6313 * @type: The resource extent type to allocate.
6315 * This function allocates the number of elements for the specified
6319 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6322 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6323 uint16_t rsrc_id, rsrc_start, j, k;
6326 unsigned long longs;
6327 unsigned long *bmask;
6328 struct lpfc_rsrc_blks *rsrc_blks;
6331 struct lpfc_id_range *id_array = NULL;
6332 void *virtaddr = NULL;
6333 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6334 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6335 struct list_head *ext_blk_list;
6337 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6343 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6345 "3009 No available Resource Extents "
6346 "for resource type 0x%x: Count: 0x%x, "
6347 "Size 0x%x\n", type, rsrc_cnt,
6352 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6353 "2903 Post resource extents type-0x%x: "
6354 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6356 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6360 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6367 * Figure out where the response is located. Then get local pointers
6368 * to the response data. The port does not guarantee to respond to
6369 * all extents counts request so update the local variable with the
6370 * allocated count from the port.
6372 if (emb == LPFC_SLI4_MBX_EMBED) {
6373 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6374 id_array = &rsrc_ext->u.rsp.id[0];
6375 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6377 virtaddr = mbox->sge_array->addr[0];
6378 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6379 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6380 id_array = &n_rsrc->id;
6383 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6384 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6387 * Based on the resource size and count, correct the base and max
6390 length = sizeof(struct lpfc_rsrc_blks);
6392 case LPFC_RSC_TYPE_FCOE_RPI:
6393 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6394 sizeof(unsigned long),
6396 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6400 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6403 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6404 kfree(phba->sli4_hba.rpi_bmask);
6410 * The next_rpi was initialized with the maximum available
6411 * count but the port may allocate a smaller number. Catch
6412 * that case and update the next_rpi.
6414 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6416 /* Initialize local ptrs for common extent processing later. */
6417 bmask = phba->sli4_hba.rpi_bmask;
6418 ids = phba->sli4_hba.rpi_ids;
6419 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6421 case LPFC_RSC_TYPE_FCOE_VPI:
6422 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6424 if (unlikely(!phba->vpi_bmask)) {
6428 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6430 if (unlikely(!phba->vpi_ids)) {
6431 kfree(phba->vpi_bmask);
6436 /* Initialize local ptrs for common extent processing later. */
6437 bmask = phba->vpi_bmask;
6438 ids = phba->vpi_ids;
6439 ext_blk_list = &phba->lpfc_vpi_blk_list;
6441 case LPFC_RSC_TYPE_FCOE_XRI:
6442 phba->sli4_hba.xri_bmask = kcalloc(longs,
6443 sizeof(unsigned long),
6445 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6449 phba->sli4_hba.max_cfg_param.xri_used = 0;
6450 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6453 if (unlikely(!phba->sli4_hba.xri_ids)) {
6454 kfree(phba->sli4_hba.xri_bmask);
6459 /* Initialize local ptrs for common extent processing later. */
6460 bmask = phba->sli4_hba.xri_bmask;
6461 ids = phba->sli4_hba.xri_ids;
6462 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6464 case LPFC_RSC_TYPE_FCOE_VFI:
6465 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6466 sizeof(unsigned long),
6468 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6472 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6475 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6476 kfree(phba->sli4_hba.vfi_bmask);
6481 /* Initialize local ptrs for common extent processing later. */
6482 bmask = phba->sli4_hba.vfi_bmask;
6483 ids = phba->sli4_hba.vfi_ids;
6484 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6487 /* Unsupported Opcode. Fail call. */
6491 ext_blk_list = NULL;
6496 * Complete initializing the extent configuration with the
6497 * allocated ids assigned to this function. The bitmask serves
6498 * as an index into the array and manages the available ids. The
6499 * array just stores the ids communicated to the port via the wqes.
6501 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6503 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6506 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6509 rsrc_blks = kzalloc(length, GFP_KERNEL);
6510 if (unlikely(!rsrc_blks)) {
6516 rsrc_blks->rsrc_start = rsrc_id;
6517 rsrc_blks->rsrc_size = rsrc_size;
6518 list_add_tail(&rsrc_blks->list, ext_blk_list);
6519 rsrc_start = rsrc_id;
6520 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6521 phba->sli4_hba.io_xri_start = rsrc_start +
6522 lpfc_sli4_get_iocb_cnt(phba);
6525 while (rsrc_id < (rsrc_start + rsrc_size)) {
6530 /* Entire word processed. Get next word.*/
6535 lpfc_sli4_mbox_cmd_free(phba, mbox);
6542 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6543 * @phba: Pointer to HBA context object.
6544 * @type: the extent's type.
6546 * This function deallocates all extents of a particular resource type.
6547 * SLI4 does not allow for deallocating a particular extent range. It
6548 * is the caller's responsibility to release all kernel memory resources.
6551 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6554 uint32_t length, mbox_tmo = 0;
6556 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6557 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6559 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6564 * This function sends an embedded mailbox because it only sends the
6565 * the resource type. All extents of this type are released by the
6568 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6569 sizeof(struct lpfc_sli4_cfg_mhdr));
6570 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6571 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6572 length, LPFC_SLI4_MBX_EMBED);
6574 /* Send an extents count of 0 - the dealloc doesn't use it. */
6575 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6576 LPFC_SLI4_MBX_EMBED);
6581 if (!phba->sli4_hba.intr_enable)
6582 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6584 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6585 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6592 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6593 if (bf_get(lpfc_mbox_hdr_status,
6594 &dealloc_rsrc->header.cfg_shdr.response)) {
6595 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6596 "2919 Failed to release resource extents "
6597 "for type %d - Status 0x%x Add'l Status 0x%x. "
6598 "Resource memory not released.\n",
6600 bf_get(lpfc_mbox_hdr_status,
6601 &dealloc_rsrc->header.cfg_shdr.response),
6602 bf_get(lpfc_mbox_hdr_add_status,
6603 &dealloc_rsrc->header.cfg_shdr.response));
6608 /* Release kernel memory resources for the specific type. */
6610 case LPFC_RSC_TYPE_FCOE_VPI:
6611 kfree(phba->vpi_bmask);
6612 kfree(phba->vpi_ids);
6613 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6614 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6615 &phba->lpfc_vpi_blk_list, list) {
6616 list_del_init(&rsrc_blk->list);
6619 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6621 case LPFC_RSC_TYPE_FCOE_XRI:
6622 kfree(phba->sli4_hba.xri_bmask);
6623 kfree(phba->sli4_hba.xri_ids);
6624 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6625 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6626 list_del_init(&rsrc_blk->list);
6630 case LPFC_RSC_TYPE_FCOE_VFI:
6631 kfree(phba->sli4_hba.vfi_bmask);
6632 kfree(phba->sli4_hba.vfi_ids);
6633 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6634 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6635 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6636 list_del_init(&rsrc_blk->list);
6640 case LPFC_RSC_TYPE_FCOE_RPI:
6641 /* RPI bitmask and physical id array are cleaned up earlier. */
6642 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6643 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6644 list_del_init(&rsrc_blk->list);
6652 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6655 mempool_free(mbox, phba->mbox_mem_pool);
6660 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6666 len = sizeof(struct lpfc_mbx_set_feature) -
6667 sizeof(struct lpfc_sli4_cfg_mhdr);
6668 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6669 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6670 LPFC_SLI4_MBX_EMBED);
6673 case LPFC_SET_UE_RECOVERY:
6674 bf_set(lpfc_mbx_set_feature_UER,
6675 &mbox->u.mqe.un.set_feature, 1);
6676 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6677 mbox->u.mqe.un.set_feature.param_len = 8;
6679 case LPFC_SET_MDS_DIAGS:
6680 bf_set(lpfc_mbx_set_feature_mds,
6681 &mbox->u.mqe.un.set_feature, 1);
6682 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6683 &mbox->u.mqe.un.set_feature, 1);
6684 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6685 mbox->u.mqe.un.set_feature.param_len = 8;
6687 case LPFC_SET_CGN_SIGNAL:
6688 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6691 sig_freq = phba->cgn_sig_freq;
6693 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6694 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6695 &mbox->u.mqe.un.set_feature, sig_freq);
6696 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6697 &mbox->u.mqe.un.set_feature, sig_freq);
6700 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6701 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6702 &mbox->u.mqe.un.set_feature, sig_freq);
6704 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6705 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6708 sig_freq = lpfc_acqe_cgn_frequency;
6710 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6711 &mbox->u.mqe.un.set_feature, sig_freq);
6713 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6714 mbox->u.mqe.un.set_feature.param_len = 12;
6716 case LPFC_SET_DUAL_DUMP:
6717 bf_set(lpfc_mbx_set_feature_dd,
6718 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6719 bf_set(lpfc_mbx_set_feature_ddquery,
6720 &mbox->u.mqe.un.set_feature, 0);
6721 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6722 mbox->u.mqe.un.set_feature.param_len = 4;
6724 case LPFC_SET_ENABLE_MI:
6725 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6726 mbox->u.mqe.un.set_feature.param_len = 4;
6727 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6728 phba->pport->cfg_lun_queue_depth);
6729 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6730 phba->sli4_hba.pc_sli4_params.mi_ver);
6732 case LPFC_SET_ENABLE_CMF:
6733 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6734 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6735 mbox->u.mqe.un.set_feature.param_len = 4;
6736 bf_set(lpfc_mbx_set_feature_cmf,
6737 &mbox->u.mqe.un.set_feature, 1);
6744 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6745 * @phba: Pointer to HBA context object.
6747 * Disable FW logging into host memory on the adapter. To
6748 * be done before reading logs from the host memory.
6751 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6753 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6755 spin_lock_irq(&phba->hbalock);
6756 ras_fwlog->state = INACTIVE;
6757 spin_unlock_irq(&phba->hbalock);
6759 /* Disable FW logging to host memory */
6760 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6761 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6763 /* Wait 10ms for firmware to stop using DMA buffer */
6764 usleep_range(10 * 1000, 20 * 1000);
6768 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6769 * @phba: Pointer to HBA context object.
6771 * This function is called to free memory allocated for RAS FW logging
6772 * support in the driver.
6775 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6777 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6778 struct lpfc_dmabuf *dmabuf, *next;
6780 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6781 list_for_each_entry_safe(dmabuf, next,
6782 &ras_fwlog->fwlog_buff_list,
6784 list_del(&dmabuf->list);
6785 dma_free_coherent(&phba->pcidev->dev,
6786 LPFC_RAS_MAX_ENTRY_SIZE,
6787 dmabuf->virt, dmabuf->phys);
6792 if (ras_fwlog->lwpd.virt) {
6793 dma_free_coherent(&phba->pcidev->dev,
6794 sizeof(uint32_t) * 2,
6795 ras_fwlog->lwpd.virt,
6796 ras_fwlog->lwpd.phys);
6797 ras_fwlog->lwpd.virt = NULL;
6800 spin_lock_irq(&phba->hbalock);
6801 ras_fwlog->state = INACTIVE;
6802 spin_unlock_irq(&phba->hbalock);
6806 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6807 * @phba: Pointer to HBA context object.
6808 * @fwlog_buff_count: Count of buffers to be created.
6810 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6811 * to update FW log is posted to the adapter.
6812 * Buffer count is calculated based on module param ras_fwlog_buffsize
6813 * Size of each buffer posted to FW is 64K.
6817 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6818 uint32_t fwlog_buff_count)
6820 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6821 struct lpfc_dmabuf *dmabuf;
6824 /* Initialize List */
6825 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6827 /* Allocate memory for the LWPD */
6828 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6829 sizeof(uint32_t) * 2,
6830 &ras_fwlog->lwpd.phys,
6832 if (!ras_fwlog->lwpd.virt) {
6833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6834 "6185 LWPD Memory Alloc Failed\n");
6839 ras_fwlog->fw_buffcount = fwlog_buff_count;
6840 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6841 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6845 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6846 "6186 Memory Alloc failed FW logging");
6850 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6851 LPFC_RAS_MAX_ENTRY_SIZE,
6852 &dmabuf->phys, GFP_KERNEL);
6853 if (!dmabuf->virt) {
6856 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6857 "6187 DMA Alloc Failed FW logging");
6860 dmabuf->buffer_tag = i;
6861 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6866 lpfc_sli4_ras_dma_free(phba);
6872 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6873 * @phba: pointer to lpfc hba data structure.
6874 * @pmb: pointer to the driver internal queue element for mailbox command.
6876 * Completion handler for driver's RAS MBX command to the device.
6879 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6882 union lpfc_sli4_cfg_shdr *shdr;
6883 uint32_t shdr_status, shdr_add_status;
6884 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6888 shdr = (union lpfc_sli4_cfg_shdr *)
6889 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6890 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6891 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6893 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6895 "6188 FW LOG mailbox "
6896 "completed with status x%x add_status x%x,"
6897 " mbx status x%x\n",
6898 shdr_status, shdr_add_status, mb->mbxStatus);
6900 ras_fwlog->ras_hwsupport = false;
6904 spin_lock_irq(&phba->hbalock);
6905 ras_fwlog->state = ACTIVE;
6906 spin_unlock_irq(&phba->hbalock);
6907 mempool_free(pmb, phba->mbox_mem_pool);
6912 /* Free RAS DMA memory */
6913 lpfc_sli4_ras_dma_free(phba);
6914 mempool_free(pmb, phba->mbox_mem_pool);
6918 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6919 * @phba: pointer to lpfc hba data structure.
6920 * @fwlog_level: Logging verbosity level.
6921 * @fwlog_enable: Enable/Disable logging.
6923 * Initialize memory and post mailbox command to enable FW logging in host
6927 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6928 uint32_t fwlog_level,
6929 uint32_t fwlog_enable)
6931 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6932 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6933 struct lpfc_dmabuf *dmabuf;
6935 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6938 spin_lock_irq(&phba->hbalock);
6939 ras_fwlog->state = INACTIVE;
6940 spin_unlock_irq(&phba->hbalock);
6942 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6943 phba->cfg_ras_fwlog_buffsize);
6944 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6947 * If re-enabling FW logging support use earlier allocated
6948 * DMA buffers while posting MBX command.
6950 if (!ras_fwlog->lwpd.virt) {
6951 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6953 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6954 "6189 FW Log Memory Allocation Failed");
6959 /* Setup Mailbox command */
6960 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6963 "6190 RAS MBX Alloc Failed");
6968 ras_fwlog->fw_loglevel = fwlog_level;
6969 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6970 sizeof(struct lpfc_sli4_cfg_mhdr));
6972 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6973 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6974 len, LPFC_SLI4_MBX_EMBED);
6976 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6977 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6979 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6980 ras_fwlog->fw_loglevel);
6981 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6982 ras_fwlog->fw_buffcount);
6983 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6984 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6986 /* Update DMA buffer address */
6987 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6988 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6990 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6991 putPaddrLow(dmabuf->phys);
6993 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6994 putPaddrHigh(dmabuf->phys);
6997 /* Update LPWD address */
6998 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6999 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7001 spin_lock_irq(&phba->hbalock);
7002 ras_fwlog->state = REG_INPROGRESS;
7003 spin_unlock_irq(&phba->hbalock);
7004 mbox->vport = phba->pport;
7005 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7009 if (rc == MBX_NOT_FINISHED) {
7010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7011 "6191 FW-Log Mailbox failed. "
7012 "status %d mbxStatus : x%x", rc,
7013 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7014 mempool_free(mbox, phba->mbox_mem_pool);
7021 lpfc_sli4_ras_dma_free(phba);
7027 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7028 * @phba: Pointer to HBA context object.
7030 * Check if RAS is supported on the adapter and initialize it.
7033 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7035 /* Check RAS FW Log needs to be enabled or not */
7036 if (lpfc_check_fwlog_support(phba))
7039 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7040 LPFC_RAS_ENABLE_LOGGING);
7044 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7045 * @phba: Pointer to HBA context object.
7047 * This function allocates all SLI4 resource identifiers.
7050 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7052 int i, rc, error = 0;
7053 uint16_t count, base;
7054 unsigned long longs;
7056 if (!phba->sli4_hba.rpi_hdrs_in_use)
7057 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7058 if (phba->sli4_hba.extents_in_use) {
7060 * The port supports resource extents. The XRI, VPI, VFI, RPI
7061 * resource extent count must be read and allocated before
7062 * provisioning the resource id arrays.
7064 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7065 LPFC_IDX_RSRC_RDY) {
7067 * Extent-based resources are set - the driver could
7068 * be in a port reset. Figure out if any corrective
7069 * actions need to be taken.
7071 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7072 LPFC_RSC_TYPE_FCOE_VFI);
7075 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7076 LPFC_RSC_TYPE_FCOE_VPI);
7079 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7080 LPFC_RSC_TYPE_FCOE_XRI);
7083 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7084 LPFC_RSC_TYPE_FCOE_RPI);
7089 * It's possible that the number of resources
7090 * provided to this port instance changed between
7091 * resets. Detect this condition and reallocate
7092 * resources. Otherwise, there is no action.
7095 lpfc_printf_log(phba, KERN_INFO,
7096 LOG_MBOX | LOG_INIT,
7097 "2931 Detected extent resource "
7098 "change. Reallocating all "
7100 rc = lpfc_sli4_dealloc_extent(phba,
7101 LPFC_RSC_TYPE_FCOE_VFI);
7102 rc = lpfc_sli4_dealloc_extent(phba,
7103 LPFC_RSC_TYPE_FCOE_VPI);
7104 rc = lpfc_sli4_dealloc_extent(phba,
7105 LPFC_RSC_TYPE_FCOE_XRI);
7106 rc = lpfc_sli4_dealloc_extent(phba,
7107 LPFC_RSC_TYPE_FCOE_RPI);
7112 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7116 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7120 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7124 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7127 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7132 * The port does not support resource extents. The XRI, VPI,
7133 * VFI, RPI resource ids were determined from READ_CONFIG.
7134 * Just allocate the bitmasks and provision the resource id
7135 * arrays. If a port reset is active, the resources don't
7136 * need any action - just exit.
7138 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7139 LPFC_IDX_RSRC_RDY) {
7140 lpfc_sli4_dealloc_resource_identifiers(phba);
7141 lpfc_sli4_remove_rpis(phba);
7144 count = phba->sli4_hba.max_cfg_param.max_rpi;
7146 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7147 "3279 Invalid provisioning of "
7152 base = phba->sli4_hba.max_cfg_param.rpi_base;
7153 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7154 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7155 sizeof(unsigned long),
7157 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7161 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7163 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7165 goto free_rpi_bmask;
7168 for (i = 0; i < count; i++)
7169 phba->sli4_hba.rpi_ids[i] = base + i;
7172 count = phba->sli4_hba.max_cfg_param.max_vpi;
7174 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7175 "3280 Invalid provisioning of "
7180 base = phba->sli4_hba.max_cfg_param.vpi_base;
7181 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7182 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7184 if (unlikely(!phba->vpi_bmask)) {
7188 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7190 if (unlikely(!phba->vpi_ids)) {
7192 goto free_vpi_bmask;
7195 for (i = 0; i < count; i++)
7196 phba->vpi_ids[i] = base + i;
7199 count = phba->sli4_hba.max_cfg_param.max_xri;
7201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7202 "3281 Invalid provisioning of "
7207 base = phba->sli4_hba.max_cfg_param.xri_base;
7208 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7209 phba->sli4_hba.xri_bmask = kcalloc(longs,
7210 sizeof(unsigned long),
7212 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7216 phba->sli4_hba.max_cfg_param.xri_used = 0;
7217 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7219 if (unlikely(!phba->sli4_hba.xri_ids)) {
7221 goto free_xri_bmask;
7224 for (i = 0; i < count; i++)
7225 phba->sli4_hba.xri_ids[i] = base + i;
7228 count = phba->sli4_hba.max_cfg_param.max_vfi;
7230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7231 "3282 Invalid provisioning of "
7236 base = phba->sli4_hba.max_cfg_param.vfi_base;
7237 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7238 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7239 sizeof(unsigned long),
7241 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7245 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7247 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7249 goto free_vfi_bmask;
7252 for (i = 0; i < count; i++)
7253 phba->sli4_hba.vfi_ids[i] = base + i;
7256 * Mark all resources ready. An HBA reset doesn't need
7257 * to reset the initialization.
7259 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7265 kfree(phba->sli4_hba.vfi_bmask);
7266 phba->sli4_hba.vfi_bmask = NULL;
7268 kfree(phba->sli4_hba.xri_ids);
7269 phba->sli4_hba.xri_ids = NULL;
7271 kfree(phba->sli4_hba.xri_bmask);
7272 phba->sli4_hba.xri_bmask = NULL;
7274 kfree(phba->vpi_ids);
7275 phba->vpi_ids = NULL;
7277 kfree(phba->vpi_bmask);
7278 phba->vpi_bmask = NULL;
7280 kfree(phba->sli4_hba.rpi_ids);
7281 phba->sli4_hba.rpi_ids = NULL;
7283 kfree(phba->sli4_hba.rpi_bmask);
7284 phba->sli4_hba.rpi_bmask = NULL;
7290 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7291 * @phba: Pointer to HBA context object.
7293 * This function allocates the number of elements for the specified
7297 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7299 if (phba->sli4_hba.extents_in_use) {
7300 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7301 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7302 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7303 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7305 kfree(phba->vpi_bmask);
7306 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7307 kfree(phba->vpi_ids);
7308 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7309 kfree(phba->sli4_hba.xri_bmask);
7310 kfree(phba->sli4_hba.xri_ids);
7311 kfree(phba->sli4_hba.vfi_bmask);
7312 kfree(phba->sli4_hba.vfi_ids);
7313 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7314 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7321 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7322 * @phba: Pointer to HBA context object.
7323 * @type: The resource extent type.
7324 * @extnt_cnt: buffer to hold port extent count response
7325 * @extnt_size: buffer to hold port extent size response.
7327 * This function calls the port to read the host allocated extents
7328 * for a particular type.
7331 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7332 uint16_t *extnt_cnt, uint16_t *extnt_size)
7336 uint16_t curr_blks = 0;
7337 uint32_t req_len, emb_len;
7338 uint32_t alloc_len, mbox_tmo;
7339 struct list_head *blk_list_head;
7340 struct lpfc_rsrc_blks *rsrc_blk;
7342 void *virtaddr = NULL;
7343 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7344 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7345 union lpfc_sli4_cfg_shdr *shdr;
7348 case LPFC_RSC_TYPE_FCOE_VPI:
7349 blk_list_head = &phba->lpfc_vpi_blk_list;
7351 case LPFC_RSC_TYPE_FCOE_XRI:
7352 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7354 case LPFC_RSC_TYPE_FCOE_VFI:
7355 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7357 case LPFC_RSC_TYPE_FCOE_RPI:
7358 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7364 /* Count the number of extents currently allocatd for this type. */
7365 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7366 if (curr_blks == 0) {
7368 * The GET_ALLOCATED mailbox does not return the size,
7369 * just the count. The size should be just the size
7370 * stored in the current allocated block and all sizes
7371 * for an extent type are the same so set the return
7374 *extnt_size = rsrc_blk->rsrc_size;
7380 * Calculate the size of an embedded mailbox. The uint32_t
7381 * accounts for extents-specific word.
7383 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7387 * Presume the allocation and response will fit into an embedded
7388 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7390 emb = LPFC_SLI4_MBX_EMBED;
7392 if (req_len > emb_len) {
7393 req_len = curr_blks * sizeof(uint16_t) +
7394 sizeof(union lpfc_sli4_cfg_shdr) +
7396 emb = LPFC_SLI4_MBX_NEMBED;
7399 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7402 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7404 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7405 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7407 if (alloc_len < req_len) {
7408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7409 "2983 Allocated DMA memory size (x%x) is "
7410 "less than the requested DMA memory "
7411 "size (x%x)\n", alloc_len, req_len);
7415 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7421 if (!phba->sli4_hba.intr_enable)
7422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7424 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7425 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7434 * Figure out where the response is located. Then get local pointers
7435 * to the response data. The port does not guarantee to respond to
7436 * all extents counts request so update the local variable with the
7437 * allocated count from the port.
7439 if (emb == LPFC_SLI4_MBX_EMBED) {
7440 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7441 shdr = &rsrc_ext->header.cfg_shdr;
7442 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7444 virtaddr = mbox->sge_array->addr[0];
7445 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7446 shdr = &n_rsrc->cfg_shdr;
7447 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7450 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7452 "2984 Failed to read allocated resources "
7453 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7455 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7456 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7461 lpfc_sli4_mbox_cmd_free(phba, mbox);
7466 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7467 * @phba: pointer to lpfc hba data structure.
7468 * @sgl_list: linked link of sgl buffers to post
7469 * @cnt: number of linked list buffers
7471 * This routine walks the list of buffers that have been allocated and
7472 * repost them to the port by using SGL block post. This is needed after a
7473 * pci_function_reset/warm_start or start. It attempts to construct blocks
7474 * of buffer sgls which contains contiguous xris and uses the non-embedded
7475 * SGL block post mailbox commands to post them to the port. For single
7476 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7477 * mailbox command for posting.
7479 * Returns: 0 = success, non-zero failure.
7482 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7483 struct list_head *sgl_list, int cnt)
7485 struct lpfc_sglq *sglq_entry = NULL;
7486 struct lpfc_sglq *sglq_entry_next = NULL;
7487 struct lpfc_sglq *sglq_entry_first = NULL;
7488 int status, total_cnt;
7489 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7490 int last_xritag = NO_XRI;
7491 LIST_HEAD(prep_sgl_list);
7492 LIST_HEAD(blck_sgl_list);
7493 LIST_HEAD(allc_sgl_list);
7494 LIST_HEAD(post_sgl_list);
7495 LIST_HEAD(free_sgl_list);
7497 spin_lock_irq(&phba->hbalock);
7498 spin_lock(&phba->sli4_hba.sgl_list_lock);
7499 list_splice_init(sgl_list, &allc_sgl_list);
7500 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7501 spin_unlock_irq(&phba->hbalock);
7504 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7505 &allc_sgl_list, list) {
7506 list_del_init(&sglq_entry->list);
7508 if ((last_xritag != NO_XRI) &&
7509 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7510 /* a hole in xri block, form a sgl posting block */
7511 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7512 post_cnt = block_cnt - 1;
7513 /* prepare list for next posting block */
7514 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7517 /* prepare list for next posting block */
7518 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7519 /* enough sgls for non-embed sgl mbox command */
7520 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7521 list_splice_init(&prep_sgl_list,
7523 post_cnt = block_cnt;
7529 /* keep track of last sgl's xritag */
7530 last_xritag = sglq_entry->sli4_xritag;
7532 /* end of repost sgl list condition for buffers */
7533 if (num_posted == total_cnt) {
7534 if (post_cnt == 0) {
7535 list_splice_init(&prep_sgl_list,
7537 post_cnt = block_cnt;
7538 } else if (block_cnt == 1) {
7539 status = lpfc_sli4_post_sgl(phba,
7540 sglq_entry->phys, 0,
7541 sglq_entry->sli4_xritag);
7543 /* successful, put sgl to posted list */
7544 list_add_tail(&sglq_entry->list,
7547 /* Failure, put sgl to free list */
7548 lpfc_printf_log(phba, KERN_WARNING,
7550 "3159 Failed to post "
7551 "sgl, xritag:x%x\n",
7552 sglq_entry->sli4_xritag);
7553 list_add_tail(&sglq_entry->list,
7560 /* continue until a nembed page worth of sgls */
7564 /* post the buffer list sgls as a block */
7565 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7569 /* success, put sgl list to posted sgl list */
7570 list_splice_init(&blck_sgl_list, &post_sgl_list);
7572 /* Failure, put sgl list to free sgl list */
7573 sglq_entry_first = list_first_entry(&blck_sgl_list,
7576 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7577 "3160 Failed to post sgl-list, "
7579 sglq_entry_first->sli4_xritag,
7580 (sglq_entry_first->sli4_xritag +
7582 list_splice_init(&blck_sgl_list, &free_sgl_list);
7583 total_cnt -= post_cnt;
7586 /* don't reset xirtag due to hole in xri block */
7588 last_xritag = NO_XRI;
7590 /* reset sgl post count for next round of posting */
7594 /* free the sgls failed to post */
7595 lpfc_free_sgl_list(phba, &free_sgl_list);
7597 /* push sgls posted to the available list */
7598 if (!list_empty(&post_sgl_list)) {
7599 spin_lock_irq(&phba->hbalock);
7600 spin_lock(&phba->sli4_hba.sgl_list_lock);
7601 list_splice_init(&post_sgl_list, sgl_list);
7602 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7603 spin_unlock_irq(&phba->hbalock);
7605 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7606 "3161 Failure to post sgl to port.\n");
7610 /* return the number of XRIs actually posted */
7615 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7616 * @phba: pointer to lpfc hba data structure.
7618 * This routine walks the list of nvme buffers that have been allocated and
7619 * repost them to the port by using SGL block post. This is needed after a
7620 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7621 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7622 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7624 * Returns: 0 = success, non-zero failure.
7627 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7629 LIST_HEAD(post_nblist);
7630 int num_posted, rc = 0;
7632 /* get all NVME buffers need to repost to a local list */
7633 lpfc_io_buf_flush(phba, &post_nblist);
7635 /* post the list of nvme buffer sgls to port if available */
7636 if (!list_empty(&post_nblist)) {
7637 num_posted = lpfc_sli4_post_io_sgl_list(
7638 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7639 /* failed to post any nvme buffer, return error */
7640 if (num_posted == 0)
7647 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7651 len = sizeof(struct lpfc_mbx_set_host_data) -
7652 sizeof(struct lpfc_sli4_cfg_mhdr);
7653 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7654 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7655 LPFC_SLI4_MBX_EMBED);
7657 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7658 mbox->u.mqe.un.set_host_data.param_len =
7659 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7660 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7661 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7662 "Linux %s v"LPFC_DRIVER_VERSION,
7663 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7667 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7668 struct lpfc_queue *drq, int count, int idx)
7671 struct lpfc_rqe hrqe;
7672 struct lpfc_rqe drqe;
7673 struct lpfc_rqb *rqbp;
7674 unsigned long flags;
7675 struct rqb_dmabuf *rqb_buffer;
7676 LIST_HEAD(rqb_buf_list);
7679 for (i = 0; i < count; i++) {
7680 spin_lock_irqsave(&phba->hbalock, flags);
7681 /* IF RQ is already full, don't bother */
7682 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7683 spin_unlock_irqrestore(&phba->hbalock, flags);
7686 spin_unlock_irqrestore(&phba->hbalock, flags);
7688 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7691 rqb_buffer->hrq = hrq;
7692 rqb_buffer->drq = drq;
7693 rqb_buffer->idx = idx;
7694 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7697 spin_lock_irqsave(&phba->hbalock, flags);
7698 while (!list_empty(&rqb_buf_list)) {
7699 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7702 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7703 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7704 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7705 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7706 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7709 "6421 Cannot post to HRQ %d: %x %x %x "
7717 rqbp->rqb_free_buffer(phba, rqb_buffer);
7719 list_add_tail(&rqb_buffer->hbuf.list,
7720 &rqbp->rqb_buffer_list);
7721 rqbp->buffer_count++;
7724 spin_unlock_irqrestore(&phba->hbalock, flags);
7729 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7731 struct lpfc_vport *vport = pmb->vport;
7732 union lpfc_sli4_cfg_shdr *shdr;
7733 u32 shdr_status, shdr_add_status;
7736 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7737 * is done. (2) Mailbox failed and send FPIN support only.
7739 shdr = (union lpfc_sli4_cfg_shdr *)
7740 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7741 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7742 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7743 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7745 "2516 CGN SET_FEATURE mbox failed with "
7746 "status x%x add_status x%x, mbx status x%x "
7747 "Reset Congestion to FPINs only\n",
7748 shdr_status, shdr_add_status,
7749 pmb->u.mb.mbxStatus);
7750 /* If there is a mbox error, move on to RDF */
7751 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7752 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7756 /* Zero out Congestion Signal ACQE counter */
7757 phba->cgn_acqe_cnt = 0;
7758 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7759 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7761 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7762 &pmb->u.mqe.un.set_feature);
7763 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7764 &pmb->u.mqe.un.set_feature);
7765 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7766 "4620 SET_FEATURES Success: Freq: %ds %dms "
7767 " Reg: x%x x%x\n", acqe, sig,
7768 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7770 mempool_free(pmb, phba->mbox_mem_pool);
7772 /* Register for FPIN events from the fabric now that the
7773 * EDC common_set_features has completed.
7775 lpfc_issue_els_rdf(vport, 0);
7779 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7781 LPFC_MBOXQ_t *mboxq;
7784 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7788 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7789 mboxq->vport = phba->pport;
7790 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7792 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7793 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7795 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7796 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7798 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7799 if (rc == MBX_NOT_FINISHED)
7804 mempool_free(mboxq, phba->mbox_mem_pool);
7806 /* If there is a mbox error, move on to RDF */
7807 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7808 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7809 lpfc_issue_els_rdf(phba->pport, 0);
7814 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7815 * @phba: pointer to lpfc hba data structure.
7817 * This routine initializes the per-cq idle_stat to dynamically dictate
7818 * polling decisions.
7823 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7826 struct lpfc_sli4_hdw_queue *hdwq;
7827 struct lpfc_queue *cq;
7828 struct lpfc_idle_stat *idle_stat;
7831 for_each_present_cpu(i) {
7832 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7835 /* Skip if we've already handled this cq's primary CPU */
7839 idle_stat = &phba->sli4_hba.idle_stat[i];
7841 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7842 idle_stat->prev_wall = wall;
7844 if (phba->nvmet_support ||
7845 phba->cmf_active_mode != LPFC_CFG_OFF)
7846 cq->poll_mode = LPFC_QUEUE_WORK;
7848 cq->poll_mode = LPFC_IRQ_POLL;
7851 if (!phba->nvmet_support)
7852 schedule_delayed_work(&phba->idle_stat_delay_work,
7853 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7856 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7860 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7861 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7862 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7863 struct lpfc_register reg_data;
7865 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7869 if (bf_get(lpfc_sliport_status_dip, ®_data))
7870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7871 "2904 Firmware Dump Image Present"
7877 * lpfc_cmf_setup - Initialize idle_stat tracking
7878 * @phba: Pointer to HBA context object.
7880 * This is called from HBA setup during driver load or when the HBA
7881 * comes online. this does all the initialization to support CMF and MI.
7884 lpfc_cmf_setup(struct lpfc_hba *phba)
7886 LPFC_MBOXQ_t *mboxq;
7887 struct lpfc_mqe *mqe;
7888 struct lpfc_dmabuf *mp;
7889 struct lpfc_pc_sli4_params *sli4_params;
7890 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7892 int rc, cmf, mi_ver;
7894 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7897 mqe = &mboxq->u.mqe;
7899 /* Read the port's SLI4 Config Parameters */
7900 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7901 sizeof(struct lpfc_sli4_cfg_mhdr));
7902 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7903 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
7904 length, LPFC_SLI4_MBX_EMBED);
7906 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7908 mempool_free(mboxq, phba->mbox_mem_pool);
7912 /* Gather info on CMF and MI support */
7913 sli4_params = &phba->sli4_hba.pc_sli4_params;
7914 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
7915 sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
7916 sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
7918 /* Are we forcing MI off via module parameter? */
7919 if (!phba->cfg_enable_mi)
7920 sli4_params->mi_ver = 0;
7922 /* Always try to enable MI feature if we can */
7923 if (sli4_params->mi_ver) {
7924 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7925 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7926 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7927 &mboxq->u.mqe.un.set_feature);
7929 if (rc == MBX_SUCCESS) {
7931 lpfc_printf_log(phba,
7932 KERN_WARNING, LOG_CGN_MGMT,
7933 "6215 MI is enabled\n");
7934 sli4_params->mi_ver = mi_ver;
7936 lpfc_printf_log(phba,
7937 KERN_WARNING, LOG_CGN_MGMT,
7938 "6338 MI is disabled\n");
7939 sli4_params->mi_ver = 0;
7942 /* mi_ver is already set from GET_SLI4_PARAMETERS */
7943 lpfc_printf_log(phba, KERN_INFO,
7944 LOG_CGN_MGMT | LOG_INIT,
7945 "6245 Enable MI Mailbox x%x (x%x/x%x) "
7946 "failed, rc:x%x mi:x%x\n",
7947 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7948 lpfc_sli_config_mbox_subsys_get
7950 lpfc_sli_config_mbox_opcode_get
7952 rc, sli4_params->mi_ver);
7955 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7956 "6217 MI is disabled\n");
7959 /* Ensure FDMI is enabled for MI if enable_mi is set */
7960 if (sli4_params->mi_ver)
7961 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
7963 /* Always try to enable CMF feature if we can */
7964 if (sli4_params->cmf) {
7965 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
7966 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7967 cmf = bf_get(lpfc_mbx_set_feature_cmf,
7968 &mboxq->u.mqe.un.set_feature);
7969 if (rc == MBX_SUCCESS && cmf) {
7970 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
7971 "6218 CMF is enabled: mode %d\n",
7972 phba->cmf_active_mode);
7974 lpfc_printf_log(phba, KERN_WARNING,
7975 LOG_CGN_MGMT | LOG_INIT,
7976 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
7977 "failed, rc:x%x dd:x%x\n",
7978 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7979 lpfc_sli_config_mbox_subsys_get
7981 lpfc_sli_config_mbox_opcode_get
7984 sli4_params->cmf = 0;
7985 phba->cmf_active_mode = LPFC_CFG_OFF;
7989 /* Allocate Congestion Information Buffer */
7991 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
7993 mp->virt = dma_alloc_coherent
7994 (&phba->pcidev->dev,
7995 sizeof(struct lpfc_cgn_info),
7996 &mp->phys, GFP_KERNEL);
7997 if (!mp || !mp->virt) {
7998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7999 "2640 Failed to alloc memory "
8000 "for Congestion Info\n");
8002 sli4_params->cmf = 0;
8003 phba->cmf_active_mode = LPFC_CFG_OFF;
8008 /* initialize congestion buffer info */
8009 lpfc_init_congestion_buf(phba);
8010 lpfc_init_congestion_stat(phba);
8013 rc = lpfc_sli4_cgn_params_read(phba);
8015 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8016 "6242 Error reading Cgn Params (%d)\n",
8018 /* Ensure CGN Mode is off */
8019 sli4_params->cmf = 0;
8021 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8022 "6243 CGN Event empty object.\n");
8023 /* Ensure CGN Mode is off */
8024 sli4_params->cmf = 0;
8028 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8029 "6220 CMF is disabled\n");
8032 /* Only register congestion buffer with firmware if BOTH
8033 * CMF and E2E are enabled.
8035 if (sli4_params->cmf && sli4_params->mi_ver) {
8036 rc = lpfc_reg_congestion_buf(phba);
8038 dma_free_coherent(&phba->pcidev->dev,
8039 sizeof(struct lpfc_cgn_info),
8040 phba->cgn_i->virt, phba->cgn_i->phys);
8043 /* Ensure CGN Mode is off */
8044 phba->cmf_active_mode = LPFC_CFG_OFF;
8048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8049 "6470 Setup MI version %d CMF %d mode %d\n",
8050 sli4_params->mi_ver, sli4_params->cmf,
8051 phba->cmf_active_mode);
8053 mempool_free(mboxq, phba->mbox_mem_pool);
8055 /* Initialize atomic counters */
8056 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8057 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8058 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8059 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8060 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8061 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8062 atomic64_set(&phba->cgn_latency_evt, 0);
8064 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8066 /* Allocate RX Monitor Buffer */
8067 if (!phba->rxtable) {
8068 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8069 sizeof(struct rxtable_entry),
8071 if (!phba->rxtable) {
8072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8073 "2644 Failed to alloc memory "
8074 "for RX Monitor Buffer\n");
8078 atomic_set(&phba->rxtable_idx_head, 0);
8079 atomic_set(&phba->rxtable_idx_tail, 0);
8084 lpfc_set_host_tm(struct lpfc_hba *phba)
8086 LPFC_MBOXQ_t *mboxq;
8088 struct timespec64 cur_time;
8090 uint32_t month, day, year;
8091 uint32_t hour, minute, second;
8092 struct lpfc_mbx_set_host_date_time *tm;
8094 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8098 len = sizeof(struct lpfc_mbx_set_host_data) -
8099 sizeof(struct lpfc_sli4_cfg_mhdr);
8100 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8101 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8102 LPFC_SLI4_MBX_EMBED);
8104 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8105 mboxq->u.mqe.un.set_host_data.param_len =
8106 sizeof(struct lpfc_mbx_set_host_date_time);
8107 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8108 ktime_get_real_ts64(&cur_time);
8109 time64_to_tm(cur_time.tv_sec, 0, &broken);
8110 month = broken.tm_mon + 1;
8111 day = broken.tm_mday;
8112 year = broken.tm_year - 100;
8113 hour = broken.tm_hour;
8114 minute = broken.tm_min;
8115 second = broken.tm_sec;
8116 bf_set(lpfc_mbx_set_host_month, tm, month);
8117 bf_set(lpfc_mbx_set_host_day, tm, day);
8118 bf_set(lpfc_mbx_set_host_year, tm, year);
8119 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8120 bf_set(lpfc_mbx_set_host_min, tm, minute);
8121 bf_set(lpfc_mbx_set_host_sec, tm, second);
8123 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8124 mempool_free(mboxq, phba->mbox_mem_pool);
8129 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8130 * @phba: Pointer to HBA context object.
8132 * This function is the main SLI4 device initialization PCI function. This
8133 * function is called by the HBA initialization code, HBA reset code and
8134 * HBA error attention handler code. Caller is not required to hold any
8138 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8140 int rc, i, cnt, len, dd;
8141 LPFC_MBOXQ_t *mboxq;
8142 struct lpfc_mqe *mqe;
8145 uint32_t ftr_rsp = 0;
8146 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8147 struct lpfc_vport *vport = phba->pport;
8148 struct lpfc_dmabuf *mp;
8149 struct lpfc_rqb *rqbp;
8152 /* Perform a PCI function reset to start from clean */
8153 rc = lpfc_pci_function_reset(phba);
8157 /* Check the HBA Host Status Register for readyness */
8158 rc = lpfc_sli4_post_status_check(phba);
8162 spin_lock_irq(&phba->hbalock);
8163 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8164 flg = phba->sli.sli_flag;
8165 spin_unlock_irq(&phba->hbalock);
8166 /* Allow a little time after setting SLI_ACTIVE for any polled
8167 * MBX commands to complete via BSG.
8169 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8171 spin_lock_irq(&phba->hbalock);
8172 flg = phba->sli.sli_flag;
8173 spin_unlock_irq(&phba->hbalock);
8177 lpfc_sli4_dip(phba);
8180 * Allocate a single mailbox container for initializing the
8183 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8187 /* Issue READ_REV to collect vpd and FW information. */
8188 vpd_size = SLI4_PAGE_SIZE;
8189 vpd = kzalloc(vpd_size, GFP_KERNEL);
8195 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8201 mqe = &mboxq->u.mqe;
8202 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8203 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8204 phba->hba_flag |= HBA_FCOE_MODE;
8205 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8207 phba->hba_flag &= ~HBA_FCOE_MODE;
8210 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8212 phba->hba_flag |= HBA_FIP_SUPPORT;
8214 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8216 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8218 if (phba->sli_rev != LPFC_SLI_REV4) {
8219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8220 "0376 READ_REV Error. SLI Level %d "
8221 "FCoE enabled %d\n",
8222 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8228 rc = lpfc_set_host_tm(phba);
8229 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8230 "6468 Set host date / time: Status x%x:\n", rc);
8233 * Continue initialization with default values even if driver failed
8234 * to read FCoE param config regions, only read parameters if the
8237 if (phba->hba_flag & HBA_FCOE_MODE &&
8238 lpfc_sli4_read_fcoe_params(phba))
8239 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8240 "2570 Failed to read FCoE parameters\n");
8243 * Retrieve sli4 device physical port name, failure of doing it
8244 * is considered as non-fatal.
8246 rc = lpfc_sli4_retrieve_pport_name(phba);
8248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8249 "3080 Successful retrieving SLI4 device "
8250 "physical port name: %s.\n", phba->Port);
8252 rc = lpfc_sli4_get_ctl_attr(phba);
8254 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8255 "8351 Successful retrieving SLI4 device "
8259 * Evaluate the read rev and vpd data. Populate the driver
8260 * state with the results. If this routine fails, the failure
8261 * is not fatal as the driver will use generic values.
8263 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8264 if (unlikely(!rc)) {
8265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8266 "0377 Error %d parsing vpd. "
8267 "Using defaults.\n", rc);
8272 /* Save information as VPD data */
8273 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8274 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8277 * This is because first G7 ASIC doesn't support the standard
8278 * 0x5a NVME cmd descriptor type/subtype
8280 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8281 LPFC_SLI_INTF_IF_TYPE_6) &&
8282 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8283 (phba->vpd.rev.smRev == 0) &&
8284 (phba->cfg_nvme_embed_cmd == 1))
8285 phba->cfg_nvme_embed_cmd = 0;
8287 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8288 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8290 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8292 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8294 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8296 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8297 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8298 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8299 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8300 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8301 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8302 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8303 "(%d):0380 READ_REV Status x%x "
8304 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8305 mboxq->vport ? mboxq->vport->vpi : 0,
8306 bf_get(lpfc_mqe_status, mqe),
8307 phba->vpd.rev.opFwName,
8308 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8309 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8311 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8312 LPFC_SLI_INTF_IF_TYPE_0) {
8313 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8314 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8315 if (rc == MBX_SUCCESS) {
8316 phba->hba_flag |= HBA_RECOVERABLE_UE;
8317 /* Set 1Sec interval to detect UE */
8318 phba->eratt_poll_interval = 1;
8319 phba->sli4_hba.ue_to_sr = bf_get(
8320 lpfc_mbx_set_feature_UESR,
8321 &mboxq->u.mqe.un.set_feature);
8322 phba->sli4_hba.ue_to_rp = bf_get(
8323 lpfc_mbx_set_feature_UERP,
8324 &mboxq->u.mqe.un.set_feature);
8328 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8329 /* Enable MDS Diagnostics only if the SLI Port supports it */
8330 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8331 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8332 if (rc != MBX_SUCCESS)
8333 phba->mds_diags_support = 0;
8337 * Discover the port's supported feature set and match it against the
8340 lpfc_request_features(phba, mboxq);
8341 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8347 /* Disable VMID if app header is not supported */
8348 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8349 &mqe->un.req_ftrs))) {
8350 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8351 phba->cfg_vmid_app_header = 0;
8352 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8353 "1242 vmid feature not supported\n");
8357 * The port must support FCP initiator mode as this is the
8358 * only mode running in the host.
8360 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8361 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8362 "0378 No support for fcpi mode.\n");
8366 /* Performance Hints are ONLY for FCoE */
8367 if (phba->hba_flag & HBA_FCOE_MODE) {
8368 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8369 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8371 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8375 * If the port cannot support the host's requested features
8376 * then turn off the global config parameters to disable the
8377 * feature in the driver. This is not a fatal error.
8379 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8380 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8381 phba->cfg_enable_bg = 0;
8382 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8387 if (phba->max_vpi && phba->cfg_enable_npiv &&
8388 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8392 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8393 "0379 Feature Mismatch Data: x%08x %08x "
8394 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8395 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8396 phba->cfg_enable_npiv, phba->max_vpi);
8397 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8398 phba->cfg_enable_bg = 0;
8399 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8400 phba->cfg_enable_npiv = 0;
8403 /* These SLI3 features are assumed in SLI4 */
8404 spin_lock_irq(&phba->hbalock);
8405 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8406 spin_unlock_irq(&phba->hbalock);
8408 /* Always try to enable dual dump feature if we can */
8409 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8410 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8411 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8412 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8414 "6448 Dual Dump is enabled\n");
8416 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8417 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8419 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8420 lpfc_sli_config_mbox_subsys_get(
8422 lpfc_sli_config_mbox_opcode_get(
8426 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8427 * calls depends on these resources to complete port setup.
8429 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8432 "2920 Failed to alloc Resource IDs "
8437 lpfc_set_host_data(phba, mboxq);
8439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8441 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8442 "2134 Failed to set host os driver version %x",
8446 /* Read the port's service parameters. */
8447 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8449 phba->link_state = LPFC_HBA_ERROR;
8454 mboxq->vport = vport;
8455 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8456 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8457 if (rc == MBX_SUCCESS) {
8458 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8463 * This memory was allocated by the lpfc_read_sparam routine. Release
8464 * it to the mbuf pool.
8466 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8468 mboxq->ctx_buf = NULL;
8470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8471 "0382 READ_SPARAM command failed "
8472 "status %d, mbxStatus x%x\n",
8473 rc, bf_get(lpfc_mqe_status, mqe));
8474 phba->link_state = LPFC_HBA_ERROR;
8479 lpfc_update_vport_wwn(vport);
8481 /* Update the fc_host data structures with new wwn. */
8482 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8483 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8485 /* Create all the SLI4 queues */
8486 rc = lpfc_sli4_queue_create(phba);
8488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8489 "3089 Failed to allocate queues\n");
8493 /* Set up all the queues to the device */
8494 rc = lpfc_sli4_queue_setup(phba);
8496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8497 "0381 Error %d during queue setup.\n ", rc);
8498 goto out_stop_timers;
8500 /* Initialize the driver internal SLI layer lists. */
8501 lpfc_sli4_setup(phba);
8502 lpfc_sli4_queue_init(phba);
8504 /* update host els xri-sgl sizes and mappings */
8505 rc = lpfc_sli4_els_sgl_update(phba);
8507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8508 "1400 Failed to update xri-sgl size and "
8509 "mapping: %d\n", rc);
8510 goto out_destroy_queue;
8513 /* register the els sgl pool to the port */
8514 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8515 phba->sli4_hba.els_xri_cnt);
8516 if (unlikely(rc < 0)) {
8517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8518 "0582 Error %d during els sgl post "
8521 goto out_destroy_queue;
8523 phba->sli4_hba.els_xri_cnt = rc;
8525 if (phba->nvmet_support) {
8526 /* update host nvmet xri-sgl sizes and mappings */
8527 rc = lpfc_sli4_nvmet_sgl_update(phba);
8529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8530 "6308 Failed to update nvmet-sgl size "
8531 "and mapping: %d\n", rc);
8532 goto out_destroy_queue;
8535 /* register the nvmet sgl pool to the port */
8536 rc = lpfc_sli4_repost_sgl_list(
8538 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8539 phba->sli4_hba.nvmet_xri_cnt);
8540 if (unlikely(rc < 0)) {
8541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8542 "3117 Error %d during nvmet "
8545 goto out_destroy_queue;
8547 phba->sli4_hba.nvmet_xri_cnt = rc;
8549 /* We allocate an iocbq for every receive context SGL.
8550 * The additional allocation is for abort and ls handling.
8552 cnt = phba->sli4_hba.nvmet_xri_cnt +
8553 phba->sli4_hba.max_cfg_param.max_xri;
8555 /* update host common xri-sgl sizes and mappings */
8556 rc = lpfc_sli4_io_sgl_update(phba);
8558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8559 "6082 Failed to update nvme-sgl size "
8560 "and mapping: %d\n", rc);
8561 goto out_destroy_queue;
8564 /* register the allocated common sgl pool to the port */
8565 rc = lpfc_sli4_repost_io_sgl_list(phba);
8567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8568 "6116 Error %d during nvme sgl post "
8570 /* Some NVME buffers were moved to abort nvme list */
8571 /* A pci function reset will repost them */
8573 goto out_destroy_queue;
8575 /* Each lpfc_io_buf job structure has an iocbq element.
8576 * This cnt provides for abort, els, ct and ls requests.
8578 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8581 if (!phba->sli.iocbq_lookup) {
8582 /* Initialize and populate the iocb list per host */
8583 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8584 "2821 initialize iocb list with %d entries\n",
8586 rc = lpfc_init_iocb_list(phba, cnt);
8588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8589 "1413 Failed to init iocb list.\n");
8590 goto out_destroy_queue;
8594 if (phba->nvmet_support)
8595 lpfc_nvmet_create_targetport(phba);
8597 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8598 /* Post initial buffers to all RQs created */
8599 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8600 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8601 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8602 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8603 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8604 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8605 rqbp->buffer_count = 0;
8607 lpfc_post_rq_buffer(
8608 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8609 phba->sli4_hba.nvmet_mrq_data[i],
8610 phba->cfg_nvmet_mrq_post, i);
8614 /* Post the rpi header region to the device. */
8615 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8618 "0393 Error %d during rpi post operation\n",
8621 goto out_free_iocblist;
8623 lpfc_sli4_node_prep(phba);
8625 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8626 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8628 * The FC Port needs to register FCFI (index 0)
8630 lpfc_reg_fcfi(phba, mboxq);
8631 mboxq->vport = phba->pport;
8632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8633 if (rc != MBX_SUCCESS)
8634 goto out_unset_queue;
8636 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8637 &mboxq->u.mqe.un.reg_fcfi);
8639 /* We are a NVME Target mode with MRQ > 1 */
8641 /* First register the FCFI */
8642 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8643 mboxq->vport = phba->pport;
8644 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8645 if (rc != MBX_SUCCESS)
8646 goto out_unset_queue;
8648 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8649 &mboxq->u.mqe.un.reg_fcfi_mrq);
8651 /* Next register the MRQs */
8652 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8653 mboxq->vport = phba->pport;
8654 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8655 if (rc != MBX_SUCCESS)
8656 goto out_unset_queue;
8659 /* Check if the port is configured to be disabled */
8660 lpfc_sli_read_link_ste(phba);
8663 /* Don't post more new bufs if repost already recovered
8666 if (phba->nvmet_support == 0) {
8667 if (phba->sli4_hba.io_xri_cnt == 0) {
8668 len = lpfc_new_io_buf(
8669 phba, phba->sli4_hba.io_xri_max);
8672 goto out_unset_queue;
8675 if (phba->cfg_xri_rebalancing)
8676 lpfc_create_multixri_pools(phba);
8679 phba->cfg_xri_rebalancing = 0;
8682 /* Allow asynchronous mailbox command to go through */
8683 spin_lock_irq(&phba->hbalock);
8684 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8685 spin_unlock_irq(&phba->hbalock);
8687 /* Post receive buffers to the device */
8688 lpfc_sli4_rb_setup(phba);
8690 /* Reset HBA FCF states after HBA reset */
8691 phba->fcf.fcf_flag = 0;
8692 phba->fcf.current_rec.flag = 0;
8694 /* Start the ELS watchdog timer */
8695 mod_timer(&vport->els_tmofunc,
8696 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8698 /* Start heart beat timer */
8699 mod_timer(&phba->hb_tmofunc,
8700 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8701 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8702 phba->last_completion_time = jiffies;
8704 /* start eq_delay heartbeat */
8705 if (phba->cfg_auto_imax)
8706 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8707 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8709 /* start per phba idle_stat_delay heartbeat */
8710 lpfc_init_idle_stat_hb(phba);
8712 /* Start error attention (ERATT) polling timer */
8713 mod_timer(&phba->eratt_poll,
8714 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8716 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8717 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8718 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8720 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8721 "2829 This device supports "
8722 "Advanced Error Reporting (AER)\n");
8723 spin_lock_irq(&phba->hbalock);
8724 phba->hba_flag |= HBA_AER_ENABLED;
8725 spin_unlock_irq(&phba->hbalock);
8727 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8728 "2830 This device does not support "
8729 "Advanced Error Reporting (AER)\n");
8730 phba->cfg_aer_support = 0;
8736 * The port is ready, set the host's link state to LINK_DOWN
8737 * in preparation for link interrupts.
8739 spin_lock_irq(&phba->hbalock);
8740 phba->link_state = LPFC_LINK_DOWN;
8742 /* Check if physical ports are trunked */
8743 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8744 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8745 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8746 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8747 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8748 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8749 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8750 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8751 spin_unlock_irq(&phba->hbalock);
8753 /* Arm the CQs and then EQs on device */
8754 lpfc_sli4_arm_cqeq_intr(phba);
8756 /* Indicate device interrupt mode */
8757 phba->sli4_hba.intr_enable = 1;
8759 /* Setup CMF after HBA is initialized */
8760 lpfc_cmf_setup(phba);
8762 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8763 (phba->hba_flag & LINK_DISABLED)) {
8764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8765 "3103 Adapter Link is disabled.\n");
8766 lpfc_down_link(phba, mboxq);
8767 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8768 if (rc != MBX_SUCCESS) {
8769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8770 "3104 Adapter failed to issue "
8771 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8772 goto out_io_buff_free;
8774 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8775 /* don't perform init_link on SLI4 FC port loopback test */
8776 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8777 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8779 goto out_io_buff_free;
8782 mempool_free(mboxq, phba->mbox_mem_pool);
8784 phba->hba_flag |= HBA_SETUP;
8788 /* Free allocated IO Buffers */
8791 /* Unset all the queues set up in this routine when error out */
8792 lpfc_sli4_queue_unset(phba);
8794 lpfc_free_iocb_list(phba);
8796 lpfc_sli4_queue_destroy(phba);
8798 lpfc_stop_hba_timers(phba);
8800 mempool_free(mboxq, phba->mbox_mem_pool);
8805 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8806 * @t: Context to fetch pointer to hba structure from.
8808 * This is the callback function for mailbox timer. The mailbox
8809 * timer is armed when a new mailbox command is issued and the timer
8810 * is deleted when the mailbox complete. The function is called by
8811 * the kernel timer code when a mailbox does not complete within
8812 * expected time. This function wakes up the worker thread to
8813 * process the mailbox timeout and returns. All the processing is
8814 * done by the worker thread function lpfc_mbox_timeout_handler.
8817 lpfc_mbox_timeout(struct timer_list *t)
8819 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8820 unsigned long iflag;
8821 uint32_t tmo_posted;
8823 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8824 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8826 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8827 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8830 lpfc_worker_wake_up(phba);
8835 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8837 * @phba: Pointer to HBA context object.
8839 * This function checks if any mailbox completions are present on the mailbox
8843 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8847 struct lpfc_queue *mcq;
8848 struct lpfc_mcqe *mcqe;
8849 bool pending_completions = false;
8852 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8855 /* Check for completions on mailbox completion queue */
8857 mcq = phba->sli4_hba.mbx_cq;
8858 idx = mcq->hba_index;
8859 qe_valid = mcq->qe_valid;
8860 while (bf_get_le32(lpfc_cqe_valid,
8861 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8862 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8863 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8864 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8865 pending_completions = true;
8868 idx = (idx + 1) % mcq->entry_count;
8869 if (mcq->hba_index == idx)
8872 /* if the index wrapped around, toggle the valid bit */
8873 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8874 qe_valid = (qe_valid) ? 0 : 1;
8876 return pending_completions;
8881 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8883 * @phba: Pointer to HBA context object.
8885 * For sli4, it is possible to miss an interrupt. As such mbox completions
8886 * maybe missed causing erroneous mailbox timeouts to occur. This function
8887 * checks to see if mbox completions are on the mailbox completion queue
8888 * and will process all the completions associated with the eq for the
8889 * mailbox completion queue.
8892 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8894 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8896 struct lpfc_queue *fpeq = NULL;
8897 struct lpfc_queue *eq;
8900 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8903 /* Find the EQ associated with the mbox CQ */
8904 if (sli4_hba->hdwq) {
8905 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8906 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8907 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8916 /* Turn off interrupts from this EQ */
8918 sli4_hba->sli4_eq_clr_intr(fpeq);
8920 /* Check to see if a mbox completion is pending */
8922 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8925 * If a mbox completion is pending, process all the events on EQ
8926 * associated with the mbox completion queue (this could include
8927 * mailbox commands, async events, els commands, receive queue data
8932 /* process and rearm the EQ */
8933 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8935 /* Always clear and re-arm the EQ */
8936 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8938 return mbox_pending;
8943 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8944 * @phba: Pointer to HBA context object.
8946 * This function is called from worker thread when a mailbox command times out.
8947 * The caller is not required to hold any locks. This function will reset the
8948 * HBA and recover all the pending commands.
8951 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8953 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8954 MAILBOX_t *mb = NULL;
8956 struct lpfc_sli *psli = &phba->sli;
8958 /* If the mailbox completed, process the completion */
8959 lpfc_sli4_process_missed_mbox_completions(phba);
8961 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8966 /* Check the pmbox pointer first. There is a race condition
8967 * between the mbox timeout handler getting executed in the
8968 * worklist and the mailbox actually completing. When this
8969 * race condition occurs, the mbox_active will be NULL.
8971 spin_lock_irq(&phba->hbalock);
8972 if (pmbox == NULL) {
8973 lpfc_printf_log(phba, KERN_WARNING,
8975 "0353 Active Mailbox cleared - mailbox timeout "
8977 spin_unlock_irq(&phba->hbalock);
8981 /* Mbox cmd <mbxCommand> timeout */
8982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8983 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8985 phba->pport->port_state,
8987 phba->sli.mbox_active);
8988 spin_unlock_irq(&phba->hbalock);
8990 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8991 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8992 * it to fail all outstanding SCSI IO.
8994 spin_lock_irq(&phba->pport->work_port_lock);
8995 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8996 spin_unlock_irq(&phba->pport->work_port_lock);
8997 spin_lock_irq(&phba->hbalock);
8998 phba->link_state = LPFC_LINK_UNKNOWN;
8999 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9000 spin_unlock_irq(&phba->hbalock);
9002 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9003 "0345 Resetting board due to mailbox timeout\n");
9005 /* Reset the HBA device */
9006 lpfc_reset_hba(phba);
9010 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9011 * @phba: Pointer to HBA context object.
9012 * @pmbox: Pointer to mailbox object.
9013 * @flag: Flag indicating how the mailbox need to be processed.
9015 * This function is called by discovery code and HBA management code
9016 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9017 * function gets the hbalock to protect the data structures.
9018 * The mailbox command can be submitted in polling mode, in which case
9019 * this function will wait in a polling loop for the completion of the
9021 * If the mailbox is submitted in no_wait mode (not polling) the
9022 * function will submit the command and returns immediately without waiting
9023 * for the mailbox completion. The no_wait is supported only when HBA
9024 * is in SLI2/SLI3 mode - interrupts are enabled.
9025 * The SLI interface allows only one mailbox pending at a time. If the
9026 * mailbox is issued in polling mode and there is already a mailbox
9027 * pending, then the function will return an error. If the mailbox is issued
9028 * in NO_WAIT mode and there is a mailbox pending already, the function
9029 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9030 * The sli layer owns the mailbox object until the completion of mailbox
9031 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9032 * return codes the caller owns the mailbox command after the return of
9036 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9040 struct lpfc_sli *psli = &phba->sli;
9041 uint32_t status, evtctr;
9042 uint32_t ha_copy, hc_copy;
9044 unsigned long timeout;
9045 unsigned long drvr_flag = 0;
9046 uint32_t word0, ldata;
9047 void __iomem *to_slim;
9048 int processing_queue = 0;
9050 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9052 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9053 /* processing mbox queue from intr_handler */
9054 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9055 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9058 processing_queue = 1;
9059 pmbox = lpfc_mbox_get(phba);
9061 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9066 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9067 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9069 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9070 lpfc_printf_log(phba, KERN_ERR,
9071 LOG_MBOX | LOG_VPORT,
9072 "1806 Mbox x%x failed. No vport\n",
9073 pmbox->u.mb.mbxCommand);
9075 goto out_not_finished;
9079 /* If the PCI channel is in offline state, do not post mbox. */
9080 if (unlikely(pci_channel_offline(phba->pcidev))) {
9081 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9082 goto out_not_finished;
9085 /* If HBA has a deferred error attention, fail the iocb. */
9086 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9087 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9088 goto out_not_finished;
9094 status = MBX_SUCCESS;
9096 if (phba->link_state == LPFC_HBA_ERROR) {
9097 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9099 /* Mbox command <mbxCommand> cannot issue */
9100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9101 "(%d):0311 Mailbox command x%x cannot "
9102 "issue Data: x%x x%x\n",
9103 pmbox->vport ? pmbox->vport->vpi : 0,
9104 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9105 goto out_not_finished;
9108 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9109 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9110 !(hc_copy & HC_MBINT_ENA)) {
9111 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9113 "(%d):2528 Mailbox command x%x cannot "
9114 "issue Data: x%x x%x\n",
9115 pmbox->vport ? pmbox->vport->vpi : 0,
9116 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9117 goto out_not_finished;
9121 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9122 /* Polling for a mbox command when another one is already active
9123 * is not allowed in SLI. Also, the driver must have established
9124 * SLI2 mode to queue and process multiple mbox commands.
9127 if (flag & MBX_POLL) {
9128 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9130 /* Mbox command <mbxCommand> cannot issue */
9131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9132 "(%d):2529 Mailbox command x%x "
9133 "cannot issue Data: x%x x%x\n",
9134 pmbox->vport ? pmbox->vport->vpi : 0,
9135 pmbox->u.mb.mbxCommand,
9136 psli->sli_flag, flag);
9137 goto out_not_finished;
9140 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9141 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9142 /* Mbox command <mbxCommand> cannot issue */
9143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9144 "(%d):2530 Mailbox command x%x "
9145 "cannot issue Data: x%x x%x\n",
9146 pmbox->vport ? pmbox->vport->vpi : 0,
9147 pmbox->u.mb.mbxCommand,
9148 psli->sli_flag, flag);
9149 goto out_not_finished;
9152 /* Another mailbox command is still being processed, queue this
9153 * command to be processed later.
9155 lpfc_mbox_put(phba, pmbox);
9157 /* Mbox cmd issue - BUSY */
9158 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9159 "(%d):0308 Mbox cmd issue - BUSY Data: "
9160 "x%x x%x x%x x%x\n",
9161 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9163 phba->pport ? phba->pport->port_state : 0xff,
9164 psli->sli_flag, flag);
9166 psli->slistat.mbox_busy++;
9167 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9170 lpfc_debugfs_disc_trc(pmbox->vport,
9171 LPFC_DISC_TRC_MBOX_VPORT,
9172 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9173 (uint32_t)mbx->mbxCommand,
9174 mbx->un.varWords[0], mbx->un.varWords[1]);
9177 lpfc_debugfs_disc_trc(phba->pport,
9179 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9180 (uint32_t)mbx->mbxCommand,
9181 mbx->un.varWords[0], mbx->un.varWords[1]);
9187 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9189 /* If we are not polling, we MUST be in SLI2 mode */
9190 if (flag != MBX_POLL) {
9191 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9192 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9193 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9194 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9195 /* Mbox command <mbxCommand> cannot issue */
9196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9197 "(%d):2531 Mailbox command x%x "
9198 "cannot issue Data: x%x x%x\n",
9199 pmbox->vport ? pmbox->vport->vpi : 0,
9200 pmbox->u.mb.mbxCommand,
9201 psli->sli_flag, flag);
9202 goto out_not_finished;
9204 /* timeout active mbox command */
9205 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9207 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9210 /* Mailbox cmd <cmd> issue */
9211 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9212 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9214 pmbox->vport ? pmbox->vport->vpi : 0,
9216 phba->pport ? phba->pport->port_state : 0xff,
9217 psli->sli_flag, flag);
9219 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9221 lpfc_debugfs_disc_trc(pmbox->vport,
9222 LPFC_DISC_TRC_MBOX_VPORT,
9223 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9224 (uint32_t)mbx->mbxCommand,
9225 mbx->un.varWords[0], mbx->un.varWords[1]);
9228 lpfc_debugfs_disc_trc(phba->pport,
9230 "MBOX Send: cmd:x%x mb:x%x x%x",
9231 (uint32_t)mbx->mbxCommand,
9232 mbx->un.varWords[0], mbx->un.varWords[1]);
9236 psli->slistat.mbox_cmd++;
9237 evtctr = psli->slistat.mbox_event;
9239 /* next set own bit for the adapter and copy over command word */
9240 mbx->mbxOwner = OWN_CHIP;
9242 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9243 /* Populate mbox extension offset word. */
9244 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9245 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9246 = (uint8_t *)phba->mbox_ext
9247 - (uint8_t *)phba->mbox;
9250 /* Copy the mailbox extension data */
9251 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9252 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9253 (uint8_t *)phba->mbox_ext,
9254 pmbox->in_ext_byte_len);
9256 /* Copy command data to host SLIM area */
9257 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9259 /* Populate mbox extension offset word. */
9260 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9261 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9262 = MAILBOX_HBA_EXT_OFFSET;
9264 /* Copy the mailbox extension data */
9265 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9266 lpfc_memcpy_to_slim(phba->MBslimaddr +
9267 MAILBOX_HBA_EXT_OFFSET,
9268 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9270 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9271 /* copy command data into host mbox for cmpl */
9272 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9275 /* First copy mbox command data to HBA SLIM, skip past first
9277 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9278 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9279 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9281 /* Next copy over first word, with mbxOwner set */
9282 ldata = *((uint32_t *)mbx);
9283 to_slim = phba->MBslimaddr;
9284 writel(ldata, to_slim);
9285 readl(to_slim); /* flush */
9287 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9288 /* switch over to host mailbox */
9289 psli->sli_flag |= LPFC_SLI_ACTIVE;
9296 /* Set up reference to mailbox command */
9297 psli->mbox_active = pmbox;
9298 /* Interrupt board to do it */
9299 writel(CA_MBATT, phba->CAregaddr);
9300 readl(phba->CAregaddr); /* flush */
9301 /* Don't wait for it to finish, just return */
9305 /* Set up null reference to mailbox command */
9306 psli->mbox_active = NULL;
9307 /* Interrupt board to do it */
9308 writel(CA_MBATT, phba->CAregaddr);
9309 readl(phba->CAregaddr); /* flush */
9311 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9312 /* First read mbox status word */
9313 word0 = *((uint32_t *)phba->mbox);
9314 word0 = le32_to_cpu(word0);
9316 /* First read mbox status word */
9317 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9318 spin_unlock_irqrestore(&phba->hbalock,
9320 goto out_not_finished;
9324 /* Read the HBA Host Attention Register */
9325 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9326 spin_unlock_irqrestore(&phba->hbalock,
9328 goto out_not_finished;
9330 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9333 /* Wait for command to complete */
9334 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9335 (!(ha_copy & HA_MBATT) &&
9336 (phba->link_state > LPFC_WARM_START))) {
9337 if (time_after(jiffies, timeout)) {
9338 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9339 spin_unlock_irqrestore(&phba->hbalock,
9341 goto out_not_finished;
9344 /* Check if we took a mbox interrupt while we were
9346 if (((word0 & OWN_CHIP) != OWN_CHIP)
9347 && (evtctr != psli->slistat.mbox_event))
9351 spin_unlock_irqrestore(&phba->hbalock,
9354 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9357 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9358 /* First copy command data */
9359 word0 = *((uint32_t *)phba->mbox);
9360 word0 = le32_to_cpu(word0);
9361 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9364 /* Check real SLIM for any errors */
9365 slimword0 = readl(phba->MBslimaddr);
9366 slimmb = (MAILBOX_t *) & slimword0;
9367 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9368 && slimmb->mbxStatus) {
9375 /* First copy command data */
9376 word0 = readl(phba->MBslimaddr);
9378 /* Read the HBA Host Attention Register */
9379 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9380 spin_unlock_irqrestore(&phba->hbalock,
9382 goto out_not_finished;
9386 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9387 /* copy results back to user */
9388 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9390 /* Copy the mailbox extension data */
9391 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9392 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9394 pmbox->out_ext_byte_len);
9397 /* First copy command data */
9398 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9400 /* Copy the mailbox extension data */
9401 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9402 lpfc_memcpy_from_slim(
9405 MAILBOX_HBA_EXT_OFFSET,
9406 pmbox->out_ext_byte_len);
9410 writel(HA_MBATT, phba->HAregaddr);
9411 readl(phba->HAregaddr); /* flush */
9413 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9414 status = mbx->mbxStatus;
9417 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9421 if (processing_queue) {
9422 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9423 lpfc_mbox_cmpl_put(phba, pmbox);
9425 return MBX_NOT_FINISHED;
9429 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9430 * @phba: Pointer to HBA context object.
9432 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9433 * the driver internal pending mailbox queue. It will then try to wait out the
9434 * possible outstanding mailbox command before return.
9437 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9438 * the outstanding mailbox command timed out.
9441 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9443 struct lpfc_sli *psli = &phba->sli;
9444 LPFC_MBOXQ_t *mboxq;
9446 unsigned long timeout = 0;
9448 u8 cmd, subsys, opcode;
9450 /* Mark the asynchronous mailbox command posting as blocked */
9451 spin_lock_irq(&phba->hbalock);
9452 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9453 /* Determine how long we might wait for the active mailbox
9454 * command to be gracefully completed by firmware.
9456 if (phba->sli.mbox_active)
9457 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9458 phba->sli.mbox_active) *
9460 spin_unlock_irq(&phba->hbalock);
9462 /* Make sure the mailbox is really active */
9464 lpfc_sli4_process_missed_mbox_completions(phba);
9466 /* Wait for the outstanding mailbox command to complete */
9467 while (phba->sli.mbox_active) {
9468 /* Check active mailbox complete status every 2ms */
9470 if (time_after(jiffies, timeout)) {
9471 /* Timeout, mark the outstanding cmd not complete */
9473 /* Sanity check sli.mbox_active has not completed or
9474 * cancelled from another context during last 2ms sleep,
9475 * so take hbalock to be sure before logging.
9477 spin_lock_irq(&phba->hbalock);
9478 if (phba->sli.mbox_active) {
9479 mboxq = phba->sli.mbox_active;
9480 cmd = mboxq->u.mb.mbxCommand;
9481 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9483 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9485 sli_flag = psli->sli_flag;
9486 spin_unlock_irq(&phba->hbalock);
9487 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9488 "2352 Mailbox command x%x "
9489 "(x%x/x%x) sli_flag x%x could "
9491 cmd, subsys, opcode,
9494 spin_unlock_irq(&phba->hbalock);
9502 /* Can not cleanly block async mailbox command, fails it */
9504 spin_lock_irq(&phba->hbalock);
9505 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9506 spin_unlock_irq(&phba->hbalock);
9512 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9513 * @phba: Pointer to HBA context object.
9515 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9516 * commands from the driver internal pending mailbox queue. It makes sure
9517 * that there is no outstanding mailbox command before resuming posting
9518 * asynchronous mailbox commands. If, for any reason, there is outstanding
9519 * mailbox command, it will try to wait it out before resuming asynchronous
9520 * mailbox command posting.
9523 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9525 struct lpfc_sli *psli = &phba->sli;
9527 spin_lock_irq(&phba->hbalock);
9528 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9529 /* Asynchronous mailbox posting is not blocked, do nothing */
9530 spin_unlock_irq(&phba->hbalock);
9534 /* Outstanding synchronous mailbox command is guaranteed to be done,
9535 * successful or timeout, after timing-out the outstanding mailbox
9536 * command shall always be removed, so just unblock posting async
9537 * mailbox command and resume
9539 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9540 spin_unlock_irq(&phba->hbalock);
9542 /* wake up worker thread to post asynchronous mailbox command */
9543 lpfc_worker_wake_up(phba);
9547 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9548 * @phba: Pointer to HBA context object.
9549 * @mboxq: Pointer to mailbox object.
9551 * The function waits for the bootstrap mailbox register ready bit from
9552 * port for twice the regular mailbox command timeout value.
9554 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9555 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9558 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9561 unsigned long timeout;
9562 struct lpfc_register bmbx_reg;
9564 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9568 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9569 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9573 if (time_after(jiffies, timeout))
9574 return MBXERR_ERROR;
9575 } while (!db_ready);
9581 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9582 * @phba: Pointer to HBA context object.
9583 * @mboxq: Pointer to mailbox object.
9585 * The function posts a mailbox to the port. The mailbox is expected
9586 * to be comletely filled in and ready for the port to operate on it.
9587 * This routine executes a synchronous completion operation on the
9588 * mailbox by polling for its completion.
9590 * The caller must not be holding any locks when calling this routine.
9593 * MBX_SUCCESS - mailbox posted successfully
9594 * Any of the MBX error values.
9597 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9599 int rc = MBX_SUCCESS;
9600 unsigned long iflag;
9601 uint32_t mcqe_status;
9603 struct lpfc_sli *psli = &phba->sli;
9604 struct lpfc_mqe *mb = &mboxq->u.mqe;
9605 struct lpfc_bmbx_create *mbox_rgn;
9606 struct dma_address *dma_address;
9609 * Only one mailbox can be active to the bootstrap mailbox region
9610 * at a time and there is no queueing provided.
9612 spin_lock_irqsave(&phba->hbalock, iflag);
9613 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9614 spin_unlock_irqrestore(&phba->hbalock, iflag);
9615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9616 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9617 "cannot issue Data: x%x x%x\n",
9618 mboxq->vport ? mboxq->vport->vpi : 0,
9619 mboxq->u.mb.mbxCommand,
9620 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9621 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9622 psli->sli_flag, MBX_POLL);
9623 return MBXERR_ERROR;
9625 /* The server grabs the token and owns it until release */
9626 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9627 phba->sli.mbox_active = mboxq;
9628 spin_unlock_irqrestore(&phba->hbalock, iflag);
9630 /* wait for bootstrap mbox register for readyness */
9631 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9635 * Initialize the bootstrap memory region to avoid stale data areas
9636 * in the mailbox post. Then copy the caller's mailbox contents to
9637 * the bmbx mailbox region.
9639 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9640 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9641 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9642 sizeof(struct lpfc_mqe));
9644 /* Post the high mailbox dma address to the port and wait for ready. */
9645 dma_address = &phba->sli4_hba.bmbx.dma_address;
9646 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9648 /* wait for bootstrap mbox register for hi-address write done */
9649 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9653 /* Post the low mailbox dma address to the port. */
9654 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9656 /* wait for bootstrap mbox register for low address write done */
9657 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9662 * Read the CQ to ensure the mailbox has completed.
9663 * If so, update the mailbox status so that the upper layers
9664 * can complete the request normally.
9666 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9667 sizeof(struct lpfc_mqe));
9668 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9669 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9670 sizeof(struct lpfc_mcqe));
9671 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9673 * When the CQE status indicates a failure and the mailbox status
9674 * indicates success then copy the CQE status into the mailbox status
9675 * (and prefix it with x4000).
9677 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9678 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9679 bf_set(lpfc_mqe_status, mb,
9680 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9683 lpfc_sli4_swap_str(phba, mboxq);
9685 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9686 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9687 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9688 " x%x x%x CQ: x%x x%x x%x x%x\n",
9689 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9690 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9691 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9692 bf_get(lpfc_mqe_status, mb),
9693 mb->un.mb_words[0], mb->un.mb_words[1],
9694 mb->un.mb_words[2], mb->un.mb_words[3],
9695 mb->un.mb_words[4], mb->un.mb_words[5],
9696 mb->un.mb_words[6], mb->un.mb_words[7],
9697 mb->un.mb_words[8], mb->un.mb_words[9],
9698 mb->un.mb_words[10], mb->un.mb_words[11],
9699 mb->un.mb_words[12], mboxq->mcqe.word0,
9700 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9701 mboxq->mcqe.trailer);
9703 /* We are holding the token, no needed for lock when release */
9704 spin_lock_irqsave(&phba->hbalock, iflag);
9705 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9706 phba->sli.mbox_active = NULL;
9707 spin_unlock_irqrestore(&phba->hbalock, iflag);
9712 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9713 * @phba: Pointer to HBA context object.
9714 * @mboxq: Pointer to mailbox object.
9715 * @flag: Flag indicating how the mailbox need to be processed.
9717 * This function is called by discovery code and HBA management code to submit
9718 * a mailbox command to firmware with SLI-4 interface spec.
9720 * Return codes the caller owns the mailbox command after the return of the
9724 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9727 struct lpfc_sli *psli = &phba->sli;
9728 unsigned long iflags;
9731 /* dump from issue mailbox command if setup */
9732 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9734 rc = lpfc_mbox_dev_check(phba);
9736 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9737 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9738 "cannot issue Data: x%x x%x\n",
9739 mboxq->vport ? mboxq->vport->vpi : 0,
9740 mboxq->u.mb.mbxCommand,
9741 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9742 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9743 psli->sli_flag, flag);
9744 goto out_not_finished;
9747 /* Detect polling mode and jump to a handler */
9748 if (!phba->sli4_hba.intr_enable) {
9749 if (flag == MBX_POLL)
9750 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9753 if (rc != MBX_SUCCESS)
9754 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9755 "(%d):2541 Mailbox command x%x "
9756 "(x%x/x%x) failure: "
9757 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9759 mboxq->vport ? mboxq->vport->vpi : 0,
9760 mboxq->u.mb.mbxCommand,
9761 lpfc_sli_config_mbox_subsys_get(phba,
9763 lpfc_sli_config_mbox_opcode_get(phba,
9765 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9766 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9767 bf_get(lpfc_mcqe_ext_status,
9769 psli->sli_flag, flag);
9771 } else if (flag == MBX_POLL) {
9772 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9773 "(%d):2542 Try to issue mailbox command "
9774 "x%x (x%x/x%x) synchronously ahead of async "
9775 "mailbox command queue: x%x x%x\n",
9776 mboxq->vport ? mboxq->vport->vpi : 0,
9777 mboxq->u.mb.mbxCommand,
9778 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9779 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9780 psli->sli_flag, flag);
9781 /* Try to block the asynchronous mailbox posting */
9782 rc = lpfc_sli4_async_mbox_block(phba);
9784 /* Successfully blocked, now issue sync mbox cmd */
9785 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9786 if (rc != MBX_SUCCESS)
9787 lpfc_printf_log(phba, KERN_WARNING,
9789 "(%d):2597 Sync Mailbox command "
9790 "x%x (x%x/x%x) failure: "
9791 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9793 mboxq->vport ? mboxq->vport->vpi : 0,
9794 mboxq->u.mb.mbxCommand,
9795 lpfc_sli_config_mbox_subsys_get(phba,
9797 lpfc_sli_config_mbox_opcode_get(phba,
9799 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9800 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9801 bf_get(lpfc_mcqe_ext_status,
9803 psli->sli_flag, flag);
9804 /* Unblock the async mailbox posting afterward */
9805 lpfc_sli4_async_mbox_unblock(phba);
9810 /* Now, interrupt mode asynchronous mailbox command */
9811 rc = lpfc_mbox_cmd_check(phba, mboxq);
9813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9814 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9815 "cannot issue Data: x%x x%x\n",
9816 mboxq->vport ? mboxq->vport->vpi : 0,
9817 mboxq->u.mb.mbxCommand,
9818 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9819 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9820 psli->sli_flag, flag);
9821 goto out_not_finished;
9824 /* Put the mailbox command to the driver internal FIFO */
9825 psli->slistat.mbox_busy++;
9826 spin_lock_irqsave(&phba->hbalock, iflags);
9827 lpfc_mbox_put(phba, mboxq);
9828 spin_unlock_irqrestore(&phba->hbalock, iflags);
9829 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9830 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9831 "x%x (x%x/x%x) x%x x%x x%x\n",
9832 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9833 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9834 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9835 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9836 phba->pport->port_state,
9837 psli->sli_flag, MBX_NOWAIT);
9838 /* Wake up worker thread to transport mailbox command from head */
9839 lpfc_worker_wake_up(phba);
9844 return MBX_NOT_FINISHED;
9848 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9849 * @phba: Pointer to HBA context object.
9851 * This function is called by worker thread to send a mailbox command to
9852 * SLI4 HBA firmware.
9856 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9858 struct lpfc_sli *psli = &phba->sli;
9859 LPFC_MBOXQ_t *mboxq;
9860 int rc = MBX_SUCCESS;
9861 unsigned long iflags;
9862 struct lpfc_mqe *mqe;
9865 /* Check interrupt mode before post async mailbox command */
9866 if (unlikely(!phba->sli4_hba.intr_enable))
9867 return MBX_NOT_FINISHED;
9869 /* Check for mailbox command service token */
9870 spin_lock_irqsave(&phba->hbalock, iflags);
9871 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9872 spin_unlock_irqrestore(&phba->hbalock, iflags);
9873 return MBX_NOT_FINISHED;
9875 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9876 spin_unlock_irqrestore(&phba->hbalock, iflags);
9877 return MBX_NOT_FINISHED;
9879 if (unlikely(phba->sli.mbox_active)) {
9880 spin_unlock_irqrestore(&phba->hbalock, iflags);
9881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9882 "0384 There is pending active mailbox cmd\n");
9883 return MBX_NOT_FINISHED;
9885 /* Take the mailbox command service token */
9886 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9888 /* Get the next mailbox command from head of queue */
9889 mboxq = lpfc_mbox_get(phba);
9891 /* If no more mailbox command waiting for post, we're done */
9893 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9894 spin_unlock_irqrestore(&phba->hbalock, iflags);
9897 phba->sli.mbox_active = mboxq;
9898 spin_unlock_irqrestore(&phba->hbalock, iflags);
9900 /* Check device readiness for posting mailbox command */
9901 rc = lpfc_mbox_dev_check(phba);
9903 /* Driver clean routine will clean up pending mailbox */
9904 goto out_not_finished;
9906 /* Prepare the mbox command to be posted */
9907 mqe = &mboxq->u.mqe;
9908 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9910 /* Start timer for the mbox_tmo and log some mailbox post messages */
9911 mod_timer(&psli->mbox_tmo, (jiffies +
9912 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9914 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9915 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9917 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9918 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9919 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9920 phba->pport->port_state, psli->sli_flag);
9922 if (mbx_cmnd != MBX_HEARTBEAT) {
9924 lpfc_debugfs_disc_trc(mboxq->vport,
9925 LPFC_DISC_TRC_MBOX_VPORT,
9926 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9927 mbx_cmnd, mqe->un.mb_words[0],
9928 mqe->un.mb_words[1]);
9930 lpfc_debugfs_disc_trc(phba->pport,
9932 "MBOX Send: cmd:x%x mb:x%x x%x",
9933 mbx_cmnd, mqe->un.mb_words[0],
9934 mqe->un.mb_words[1]);
9937 psli->slistat.mbox_cmd++;
9939 /* Post the mailbox command to the port */
9940 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9941 if (rc != MBX_SUCCESS) {
9942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9943 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9944 "cannot issue Data: x%x x%x\n",
9945 mboxq->vport ? mboxq->vport->vpi : 0,
9946 mboxq->u.mb.mbxCommand,
9947 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9948 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9949 psli->sli_flag, MBX_NOWAIT);
9950 goto out_not_finished;
9956 spin_lock_irqsave(&phba->hbalock, iflags);
9957 if (phba->sli.mbox_active) {
9958 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9959 __lpfc_mbox_cmpl_put(phba, mboxq);
9960 /* Release the token */
9961 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9962 phba->sli.mbox_active = NULL;
9964 spin_unlock_irqrestore(&phba->hbalock, iflags);
9966 return MBX_NOT_FINISHED;
9970 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9971 * @phba: Pointer to HBA context object.
9972 * @pmbox: Pointer to mailbox object.
9973 * @flag: Flag indicating how the mailbox need to be processed.
9975 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9976 * the API jump table function pointer from the lpfc_hba struct.
9978 * Return codes the caller owns the mailbox command after the return of the
9982 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9984 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9988 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9989 * @phba: The hba struct for which this call is being executed.
9990 * @dev_grp: The HBA PCI-Device group number.
9992 * This routine sets up the mbox interface API function jump table in @phba
9994 * Returns: 0 - success, -ENODEV - failure.
9997 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10001 case LPFC_PCI_DEV_LP:
10002 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10003 phba->lpfc_sli_handle_slow_ring_event =
10004 lpfc_sli_handle_slow_ring_event_s3;
10005 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10006 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10007 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10009 case LPFC_PCI_DEV_OC:
10010 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10011 phba->lpfc_sli_handle_slow_ring_event =
10012 lpfc_sli_handle_slow_ring_event_s4;
10013 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10014 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10015 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10019 "1420 Invalid HBA PCI-device group: 0x%x\n",
10027 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10028 * @phba: Pointer to HBA context object.
10029 * @pring: Pointer to driver SLI ring object.
10030 * @piocb: Pointer to address of newly added command iocb.
10032 * This function is called with hbalock held for SLI3 ports or
10033 * the ring lock held for SLI4 ports to add a command
10034 * iocb to the txq when SLI layer cannot submit the command iocb
10038 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10039 struct lpfc_iocbq *piocb)
10041 if (phba->sli_rev == LPFC_SLI_REV4)
10042 lockdep_assert_held(&pring->ring_lock);
10044 lockdep_assert_held(&phba->hbalock);
10045 /* Insert the caller's iocb in the txq tail for later processing. */
10046 list_add_tail(&piocb->list, &pring->txq);
10050 * lpfc_sli_next_iocb - Get the next iocb in the txq
10051 * @phba: Pointer to HBA context object.
10052 * @pring: Pointer to driver SLI ring object.
10053 * @piocb: Pointer to address of newly added command iocb.
10055 * This function is called with hbalock held before a new
10056 * iocb is submitted to the firmware. This function checks
10057 * txq to flush the iocbs in txq to Firmware before
10058 * submitting new iocbs to the Firmware.
10059 * If there are iocbs in the txq which need to be submitted
10060 * to firmware, lpfc_sli_next_iocb returns the first element
10061 * of the txq after dequeuing it from txq.
10062 * If there is no iocb in the txq then the function will return
10063 * *piocb and *piocb is set to NULL. Caller needs to check
10064 * *piocb to find if there are more commands in the txq.
10066 static struct lpfc_iocbq *
10067 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10068 struct lpfc_iocbq **piocb)
10070 struct lpfc_iocbq * nextiocb;
10072 lockdep_assert_held(&phba->hbalock);
10074 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10084 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10085 * @phba: Pointer to HBA context object.
10086 * @ring_number: SLI ring number to issue iocb on.
10087 * @piocb: Pointer to command iocb.
10088 * @flag: Flag indicating if this command can be put into txq.
10090 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10091 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10092 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10093 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10094 * this function allows only iocbs for posting buffers. This function finds
10095 * next available slot in the command ring and posts the command to the
10096 * available slot and writes the port attention register to request HBA start
10097 * processing new iocb. If there is no slot available in the ring and
10098 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10099 * the function returns IOCB_BUSY.
10101 * This function is called with hbalock held. The function will return success
10102 * after it successfully submit the iocb to firmware or after adding to the
10106 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10107 struct lpfc_iocbq *piocb, uint32_t flag)
10109 struct lpfc_iocbq *nextiocb;
10111 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10113 lockdep_assert_held(&phba->hbalock);
10115 if (piocb->iocb_cmpl && (!piocb->vport) &&
10116 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10117 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10118 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10119 "1807 IOCB x%x failed. No vport\n",
10120 piocb->iocb.ulpCommand);
10126 /* If the PCI channel is in offline state, do not post iocbs. */
10127 if (unlikely(pci_channel_offline(phba->pcidev)))
10130 /* If HBA has a deferred error attention, fail the iocb. */
10131 if (unlikely(phba->hba_flag & DEFER_ERATT))
10135 * We should never get an IOCB if we are in a < LINK_DOWN state
10137 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10141 * Check to see if we are blocking IOCB processing because of a
10142 * outstanding event.
10144 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10147 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10149 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10150 * can be issued if the link is not up.
10152 switch (piocb->iocb.ulpCommand) {
10153 case CMD_GEN_REQUEST64_CR:
10154 case CMD_GEN_REQUEST64_CX:
10155 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10156 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10157 FC_RCTL_DD_UNSOL_CMD) ||
10158 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10159 MENLO_TRANSPORT_TYPE))
10163 case CMD_QUE_RING_BUF_CN:
10164 case CMD_QUE_RING_BUF64_CN:
10166 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10167 * completion, iocb_cmpl MUST be 0.
10169 if (piocb->iocb_cmpl)
10170 piocb->iocb_cmpl = NULL;
10172 case CMD_CREATE_XRI_CR:
10173 case CMD_CLOSE_XRI_CN:
10174 case CMD_CLOSE_XRI_CX:
10181 * For FCP commands, we must be in a state where we can process link
10182 * attention events.
10184 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10185 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10189 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10190 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10191 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10194 lpfc_sli_update_ring(phba, pring);
10196 lpfc_sli_update_full_ring(phba, pring);
10199 return IOCB_SUCCESS;
10204 pring->stats.iocb_cmd_delay++;
10208 if (!(flag & SLI_IOCB_RET_IOCB)) {
10209 __lpfc_sli_ringtx_put(phba, pring, piocb);
10210 return IOCB_SUCCESS;
10217 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
10218 * @phba: Pointer to HBA context object.
10219 * @piocbq: Pointer to command iocb.
10220 * @sglq: Pointer to the scatter gather queue object.
10222 * This routine converts the bpl or bde that is in the IOCB
10223 * to a sgl list for the sli4 hardware. The physical address
10224 * of the bpl/bde is converted back to a virtual address.
10225 * If the IOCB contains a BPL then the list of BDE's is
10226 * converted to sli4_sge's. If the IOCB contains a single
10227 * BDE then it is converted to a single sli_sge.
10228 * The IOCB is still in cpu endianess so the contents of
10229 * the bpl can be used without byte swapping.
10231 * Returns valid XRI = Success, NO_XRI = Failure.
10234 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
10235 struct lpfc_sglq *sglq)
10237 uint16_t xritag = NO_XRI;
10238 struct ulp_bde64 *bpl = NULL;
10239 struct ulp_bde64 bde;
10240 struct sli4_sge *sgl = NULL;
10241 struct lpfc_dmabuf *dmabuf;
10245 uint32_t offset = 0; /* accumulated offset in the sg request list */
10246 int inbound = 0; /* number of sg reply entries inbound from firmware */
10248 if (!piocbq || !sglq)
10251 sgl = (struct sli4_sge *)sglq->sgl;
10252 icmd = &piocbq->iocb;
10253 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
10254 return sglq->sli4_xritag;
10255 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10256 numBdes = icmd->un.genreq64.bdl.bdeSize /
10257 sizeof(struct ulp_bde64);
10258 /* The addrHigh and addrLow fields within the IOCB
10259 * have not been byteswapped yet so there is no
10260 * need to swap them back.
10262 if (piocbq->context3)
10263 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
10267 bpl = (struct ulp_bde64 *)dmabuf->virt;
10271 for (i = 0; i < numBdes; i++) {
10272 /* Should already be byte swapped. */
10273 sgl->addr_hi = bpl->addrHigh;
10274 sgl->addr_lo = bpl->addrLow;
10276 sgl->word2 = le32_to_cpu(sgl->word2);
10277 if ((i+1) == numBdes)
10278 bf_set(lpfc_sli4_sge_last, sgl, 1);
10280 bf_set(lpfc_sli4_sge_last, sgl, 0);
10281 /* swap the size field back to the cpu so we
10282 * can assign it to the sgl.
10284 bde.tus.w = le32_to_cpu(bpl->tus.w);
10285 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
10286 /* The offsets in the sgl need to be accumulated
10287 * separately for the request and reply lists.
10288 * The request is always first, the reply follows.
10290 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
10291 /* add up the reply sg entries */
10292 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
10294 /* first inbound? reset the offset */
10297 bf_set(lpfc_sli4_sge_offset, sgl, offset);
10298 bf_set(lpfc_sli4_sge_type, sgl,
10299 LPFC_SGE_TYPE_DATA);
10300 offset += bde.tus.f.bdeSize;
10302 sgl->word2 = cpu_to_le32(sgl->word2);
10306 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
10307 /* The addrHigh and addrLow fields of the BDE have not
10308 * been byteswapped yet so they need to be swapped
10309 * before putting them in the sgl.
10312 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
10314 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
10315 sgl->word2 = le32_to_cpu(sgl->word2);
10316 bf_set(lpfc_sli4_sge_last, sgl, 1);
10317 sgl->word2 = cpu_to_le32(sgl->word2);
10319 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
10321 return sglq->sli4_xritag;
10325 * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
10326 * @phba: Pointer to HBA context object.
10327 * @iocbq: Pointer to command iocb.
10328 * @wqe: Pointer to the work queue entry.
10330 * This routine converts the iocb command to its Work Queue Entry
10331 * equivalent. The wqe pointer should not have any fields set when
10332 * this routine is called because it will memcpy over them.
10333 * This routine does not set the CQ_ID or the WQEC bits in the
10336 * Returns: 0 = Success, IOCB_ERROR = Failure.
10339 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
10340 union lpfc_wqe128 *wqe)
10342 uint32_t xmit_len = 0, total_len = 0;
10345 uint32_t abort_tag;
10346 uint8_t command_type = ELS_COMMAND_NON_FIP;
10349 uint16_t abrt_iotag;
10350 struct lpfc_iocbq *abrtiocbq;
10351 struct ulp_bde64 *bpl = NULL;
10352 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
10354 struct ulp_bde64 bde;
10355 struct lpfc_nodelist *ndlp;
10359 fip = phba->hba_flag & HBA_FIP_SUPPORT;
10360 /* The fcp commands will set command type */
10361 if (iocbq->iocb_flag & LPFC_IO_FCP)
10362 command_type = FCP_COMMAND;
10363 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
10364 command_type = ELS_COMMAND_FIP;
10366 command_type = ELS_COMMAND_NON_FIP;
10368 if (phba->fcp_embed_io)
10369 memset(wqe, 0, sizeof(union lpfc_wqe128));
10370 /* Some of the fields are in the right position already */
10371 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
10372 /* The ct field has moved so reset */
10373 wqe->generic.wqe_com.word7 = 0;
10374 wqe->generic.wqe_com.word10 = 0;
10376 abort_tag = (uint32_t) iocbq->iotag;
10377 xritag = iocbq->sli4_xritag;
10378 /* words0-2 bpl convert bde */
10379 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
10380 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10381 sizeof(struct ulp_bde64);
10382 bpl = (struct ulp_bde64 *)
10383 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
10387 /* Should already be byte swapped. */
10388 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
10389 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
10390 /* swap the size field back to the cpu so we
10391 * can assign it to the sgl.
10393 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
10394 xmit_len = wqe->generic.bde.tus.f.bdeSize;
10396 for (i = 0; i < numBdes; i++) {
10397 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10398 total_len += bde.tus.f.bdeSize;
10401 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
10403 iocbq->iocb.ulpIoTag = iocbq->iotag;
10404 cmnd = iocbq->iocb.ulpCommand;
10406 switch (iocbq->iocb.ulpCommand) {
10407 case CMD_ELS_REQUEST64_CR:
10408 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
10409 ndlp = iocbq->context_un.ndlp;
10411 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10412 if (!iocbq->iocb.ulpLe) {
10413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10414 "2007 Only Limited Edition cmd Format"
10415 " supported 0x%x\n",
10416 iocbq->iocb.ulpCommand);
10420 wqe->els_req.payload_len = xmit_len;
10421 /* Els_reguest64 has a TMO */
10422 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
10423 iocbq->iocb.ulpTimeout);
10424 /* Need a VF for word 4 set the vf bit*/
10425 bf_set(els_req64_vf, &wqe->els_req, 0);
10426 /* And a VFID for word 12 */
10427 bf_set(els_req64_vfid, &wqe->els_req, 0);
10428 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10429 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10430 iocbq->iocb.ulpContext);
10431 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
10432 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
10433 /* CCP CCPE PV PRI in word10 were set in the memcpy */
10434 if (command_type == ELS_COMMAND_FIP)
10435 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
10436 >> LPFC_FIP_ELS_ID_SHIFT);
10437 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10438 iocbq->context2)->virt);
10439 if_type = bf_get(lpfc_sli_intf_if_type,
10440 &phba->sli4_hba.sli_intf);
10441 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10442 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
10443 *pcmd == ELS_CMD_SCR ||
10444 *pcmd == ELS_CMD_RDF ||
10445 *pcmd == ELS_CMD_EDC ||
10446 *pcmd == ELS_CMD_RSCN_XMT ||
10447 *pcmd == ELS_CMD_FDISC ||
10448 *pcmd == ELS_CMD_LOGO ||
10449 *pcmd == ELS_CMD_QFPA ||
10450 *pcmd == ELS_CMD_UVEM ||
10451 *pcmd == ELS_CMD_PLOGI)) {
10452 bf_set(els_req64_sp, &wqe->els_req, 1);
10453 bf_set(els_req64_sid, &wqe->els_req,
10454 iocbq->vport->fc_myDID);
10455 if ((*pcmd == ELS_CMD_FLOGI) &&
10456 !(phba->fc_topology ==
10457 LPFC_TOPOLOGY_LOOP))
10458 bf_set(els_req64_sid, &wqe->els_req, 0);
10459 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
10460 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10461 phba->vpi_ids[iocbq->vport->vpi]);
10462 } else if (pcmd && iocbq->context1) {
10463 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
10464 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10465 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10468 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
10469 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10470 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10471 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
10472 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
10473 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
10474 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10475 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
10476 wqe->els_req.max_response_payload_len = total_len - xmit_len;
10478 case CMD_XMIT_SEQUENCE64_CX:
10479 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
10480 iocbq->iocb.un.ulpWord[3]);
10481 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
10482 iocbq->iocb.unsli3.rcvsli3.ox_id);
10483 /* The entire sequence is transmitted for this IOCB */
10484 xmit_len = total_len;
10485 cmnd = CMD_XMIT_SEQUENCE64_CR;
10486 if (phba->link_flag & LS_LOOPBACK_MODE)
10487 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
10489 case CMD_XMIT_SEQUENCE64_CR:
10490 /* word3 iocb=io_tag32 wqe=reserved */
10491 wqe->xmit_sequence.rsvd3 = 0;
10492 /* word4 relative_offset memcpy */
10493 /* word5 r_ctl/df_ctl memcpy */
10494 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
10495 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
10496 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
10497 LPFC_WQE_IOD_WRITE);
10498 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
10499 LPFC_WQE_LENLOC_WORD12);
10500 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
10501 wqe->xmit_sequence.xmit_len = xmit_len;
10502 command_type = OTHER_COMMAND;
10504 case CMD_XMIT_BCAST64_CN:
10505 /* word3 iocb=iotag32 wqe=seq_payload_len */
10506 wqe->xmit_bcast64.seq_payload_len = xmit_len;
10507 /* word4 iocb=rsvd wqe=rsvd */
10508 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
10509 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
10510 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
10511 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10512 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
10513 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
10514 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
10515 LPFC_WQE_LENLOC_WORD3);
10516 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
10518 case CMD_FCP_IWRITE64_CR:
10519 command_type = FCP_COMMAND_DATA_OUT;
10520 /* word3 iocb=iotag wqe=payload_offset_len */
10521 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10522 bf_set(payload_offset_len, &wqe->fcp_iwrite,
10523 xmit_len + sizeof(struct fcp_rsp));
10524 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
10526 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10527 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10528 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
10529 iocbq->iocb.ulpFCP2Rcvy);
10530 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
10531 /* Always open the exchange */
10532 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
10533 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
10534 LPFC_WQE_LENLOC_WORD4);
10535 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
10536 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
10537 if (iocbq->iocb_flag & LPFC_IO_OAS) {
10538 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
10539 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10540 if (iocbq->priority) {
10541 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10542 (iocbq->priority << 1));
10544 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10545 (phba->cfg_XLanePriority << 1));
10548 /* Note, word 10 is already initialized to 0 */
10550 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10551 if (phba->cfg_enable_pbde)
10552 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
10554 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
10556 if (phba->fcp_embed_io) {
10557 struct lpfc_io_buf *lpfc_cmd;
10558 struct sli4_sge *sgl;
10559 struct fcp_cmnd *fcp_cmnd;
10562 /* 128 byte wqe support here */
10564 lpfc_cmd = iocbq->context1;
10565 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10566 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10568 /* Word 0-2 - FCP_CMND */
10569 wqe->generic.bde.tus.f.bdeFlags =
10570 BUFF_TYPE_BDE_IMMED;
10571 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10572 wqe->generic.bde.addrHigh = 0;
10573 wqe->generic.bde.addrLow = 88; /* Word 22 */
10575 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10576 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10578 /* Word 22-29 FCP CMND Payload */
10579 ptr = &wqe->words[22];
10580 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10583 case CMD_FCP_IREAD64_CR:
10584 /* word3 iocb=iotag wqe=payload_offset_len */
10585 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10586 bf_set(payload_offset_len, &wqe->fcp_iread,
10587 xmit_len + sizeof(struct fcp_rsp));
10588 bf_set(cmd_buff_len, &wqe->fcp_iread,
10590 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
10591 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
10592 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
10593 iocbq->iocb.ulpFCP2Rcvy);
10594 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
10595 /* Always open the exchange */
10596 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
10597 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
10598 LPFC_WQE_LENLOC_WORD4);
10599 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
10600 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
10601 if (iocbq->iocb_flag & LPFC_IO_OAS) {
10602 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
10603 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
10604 if (iocbq->priority) {
10605 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10606 (iocbq->priority << 1));
10608 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
10609 (phba->cfg_XLanePriority << 1));
10612 /* Note, word 10 is already initialized to 0 */
10614 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
10615 if (phba->cfg_enable_pbde)
10616 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
10618 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
10620 if (phba->fcp_embed_io) {
10621 struct lpfc_io_buf *lpfc_cmd;
10622 struct sli4_sge *sgl;
10623 struct fcp_cmnd *fcp_cmnd;
10626 /* 128 byte wqe support here */
10628 lpfc_cmd = iocbq->context1;
10629 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10630 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10632 /* Word 0-2 - FCP_CMND */
10633 wqe->generic.bde.tus.f.bdeFlags =
10634 BUFF_TYPE_BDE_IMMED;
10635 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10636 wqe->generic.bde.addrHigh = 0;
10637 wqe->generic.bde.addrLow = 88; /* Word 22 */
10639 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
10640 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
10642 /* Word 22-29 FCP CMND Payload */
10643 ptr = &wqe->words[22];
10644 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10647 case CMD_FCP_ICMND64_CR:
10648 /* word3 iocb=iotag wqe=payload_offset_len */
10649 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
10650 bf_set(payload_offset_len, &wqe->fcp_icmd,
10651 xmit_len + sizeof(struct fcp_rsp));
10652 bf_set(cmd_buff_len, &wqe->fcp_icmd,
10654 /* word3 iocb=IO_TAG wqe=reserved */
10655 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
10656 /* Always open the exchange */
10657 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
10658 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
10659 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
10660 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
10661 LPFC_WQE_LENLOC_NONE);
10662 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
10663 iocbq->iocb.ulpFCP2Rcvy);
10664 if (iocbq->iocb_flag & LPFC_IO_OAS) {
10665 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
10666 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
10667 if (iocbq->priority) {
10668 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10669 (iocbq->priority << 1));
10671 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
10672 (phba->cfg_XLanePriority << 1));
10675 /* Note, word 10 is already initialized to 0 */
10677 if (phba->fcp_embed_io) {
10678 struct lpfc_io_buf *lpfc_cmd;
10679 struct sli4_sge *sgl;
10680 struct fcp_cmnd *fcp_cmnd;
10683 /* 128 byte wqe support here */
10685 lpfc_cmd = iocbq->context1;
10686 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10687 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10689 /* Word 0-2 - FCP_CMND */
10690 wqe->generic.bde.tus.f.bdeFlags =
10691 BUFF_TYPE_BDE_IMMED;
10692 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10693 wqe->generic.bde.addrHigh = 0;
10694 wqe->generic.bde.addrLow = 88; /* Word 22 */
10696 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10697 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10699 /* Word 22-29 FCP CMND Payload */
10700 ptr = &wqe->words[22];
10701 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10704 case CMD_GEN_REQUEST64_CR:
10705 /* For this command calculate the xmit length of the
10709 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10710 sizeof(struct ulp_bde64);
10711 for (i = 0; i < numBdes; i++) {
10712 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10713 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10715 xmit_len += bde.tus.f.bdeSize;
10717 /* word3 iocb=IO_TAG wqe=request_payload_len */
10718 wqe->gen_req.request_payload_len = xmit_len;
10719 /* word4 iocb=parameter wqe=relative_offset memcpy */
10720 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10721 /* word6 context tag copied in memcpy */
10722 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10723 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10725 "2015 Invalid CT %x command 0x%x\n",
10726 ct, iocbq->iocb.ulpCommand);
10729 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10730 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10731 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10732 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10733 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10734 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10735 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10736 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10737 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10738 command_type = OTHER_COMMAND;
10740 case CMD_XMIT_ELS_RSP64_CX:
10741 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10742 /* words0-2 BDE memcpy */
10743 /* word3 iocb=iotag32 wqe=response_payload_len */
10744 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10746 wqe->xmit_els_rsp.word4 = 0;
10747 /* word5 iocb=rsvd wge=did */
10748 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10749 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10751 if_type = bf_get(lpfc_sli_intf_if_type,
10752 &phba->sli4_hba.sli_intf);
10753 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10754 if (iocbq->vport->fc_flag & FC_PT2PT) {
10755 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10756 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10757 iocbq->vport->fc_myDID);
10758 if (iocbq->vport->fc_myDID == Fabric_DID) {
10759 bf_set(wqe_els_did,
10760 &wqe->xmit_els_rsp.wqe_dest, 0);
10764 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10765 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10766 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10767 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10768 iocbq->iocb.unsli3.rcvsli3.ox_id);
10769 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10770 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10771 phba->vpi_ids[iocbq->vport->vpi]);
10772 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10773 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10774 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10775 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10776 LPFC_WQE_LENLOC_WORD3);
10777 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10778 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10779 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10780 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10781 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10782 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10783 iocbq->vport->fc_myDID);
10784 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10785 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10786 phba->vpi_ids[phba->pport->vpi]);
10788 command_type = OTHER_COMMAND;
10790 case CMD_CLOSE_XRI_CN:
10791 case CMD_ABORT_XRI_CN:
10792 case CMD_ABORT_XRI_CX:
10793 /* words 0-2 memcpy should be 0 rserved */
10794 /* port will send abts */
10795 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10796 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10797 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10798 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10802 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10804 * The link is down, or the command was ELS_FIP
10805 * so the fw does not need to send abts
10808 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10810 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10811 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10812 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10813 wqe->abort_cmd.rsrvd5 = 0;
10814 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10815 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10816 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10818 * The abort handler will send us CMD_ABORT_XRI_CN or
10819 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10821 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10822 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10823 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10824 LPFC_WQE_LENLOC_NONE);
10825 cmnd = CMD_ABORT_XRI_CX;
10826 command_type = OTHER_COMMAND;
10829 case CMD_XMIT_BLS_RSP64_CX:
10830 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10831 /* As BLS ABTS RSP WQE is very different from other WQEs,
10832 * we re-construct this WQE here based on information in
10833 * iocbq from scratch.
10835 memset(wqe, 0, sizeof(*wqe));
10836 /* OX_ID is invariable to who sent ABTS to CT exchange */
10837 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10838 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10839 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10840 LPFC_ABTS_UNSOL_INT) {
10841 /* ABTS sent by initiator to CT exchange, the
10842 * RX_ID field will be filled with the newly
10843 * allocated responder XRI.
10845 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10846 iocbq->sli4_xritag);
10848 /* ABTS sent by responder to CT exchange, the
10849 * RX_ID field will be filled with the responder
10852 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10853 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10855 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10856 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10859 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10861 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10862 iocbq->iocb.ulpContext);
10863 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10864 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10865 phba->vpi_ids[phba->pport->vpi]);
10866 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10867 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10868 LPFC_WQE_LENLOC_NONE);
10869 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10870 command_type = OTHER_COMMAND;
10871 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10872 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10873 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10874 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10875 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10876 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10877 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10881 case CMD_SEND_FRAME:
10882 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10883 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10884 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10885 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10886 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10887 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10888 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10889 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10890 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10891 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10892 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10894 case CMD_XRI_ABORTED_CX:
10895 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10896 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10897 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10898 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10899 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10902 "2014 Invalid command 0x%x\n",
10903 iocbq->iocb.ulpCommand);
10907 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10908 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10909 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10910 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10911 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10912 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10913 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10914 LPFC_IO_DIF_INSERT);
10915 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10916 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10917 wqe->generic.wqe_com.abort_tag = abort_tag;
10918 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10919 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10920 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10921 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10926 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10927 * @phba: Pointer to HBA context object.
10928 * @ring_number: SLI ring number to issue wqe on.
10929 * @piocb: Pointer to command iocb.
10930 * @flag: Flag indicating if this command can be put into txq.
10932 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10933 * send an iocb command to an HBA with SLI-4 interface spec.
10935 * This function takes the hbalock before invoking the lockless version.
10936 * The function will return success after it successfully submit the wqe to
10937 * firmware or after adding to the txq.
10940 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10941 struct lpfc_iocbq *piocb, uint32_t flag)
10943 unsigned long iflags;
10946 spin_lock_irqsave(&phba->hbalock, iflags);
10947 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10948 spin_unlock_irqrestore(&phba->hbalock, iflags);
10954 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10955 * @phba: Pointer to HBA context object.
10956 * @ring_number: SLI ring number to issue wqe on.
10957 * @piocb: Pointer to command iocb.
10958 * @flag: Flag indicating if this command can be put into txq.
10960 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10961 * an wqe command to an HBA with SLI-4 interface spec.
10963 * This function is a lockless version. The function will return success
10964 * after it successfully submit the wqe to firmware or after adding to the
10968 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10969 struct lpfc_iocbq *piocb, uint32_t flag)
10972 struct lpfc_io_buf *lpfc_cmd =
10973 (struct lpfc_io_buf *)piocb->context1;
10974 union lpfc_wqe128 *wqe = &piocb->wqe;
10975 struct sli4_sge *sgl;
10977 /* 128 byte wqe support here */
10978 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10980 if (phba->fcp_embed_io) {
10981 struct fcp_cmnd *fcp_cmnd;
10984 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10986 /* Word 0-2 - FCP_CMND */
10987 wqe->generic.bde.tus.f.bdeFlags =
10988 BUFF_TYPE_BDE_IMMED;
10989 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10990 wqe->generic.bde.addrHigh = 0;
10991 wqe->generic.bde.addrLow = 88; /* Word 22 */
10993 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10994 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10996 /* Word 22-29 FCP CMND Payload */
10997 ptr = &wqe->words[22];
10998 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
11000 /* Word 0-2 - Inline BDE */
11001 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
11002 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
11003 wqe->generic.bde.addrHigh = sgl->addr_hi;
11004 wqe->generic.bde.addrLow = sgl->addr_lo;
11007 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
11008 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
11011 /* add the VMID tags as per switch response */
11012 if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
11013 if (phba->pport->vmid_priority_tagging) {
11014 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
11015 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
11016 (piocb->vmid_tag.cs_ctl_vmid));
11018 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
11019 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
11020 wqe->words[31] = piocb->vmid_tag.app_id;
11023 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
11028 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
11029 * @phba: Pointer to HBA context object.
11030 * @ring_number: SLI ring number to issue iocb on.
11031 * @piocb: Pointer to command iocb.
11032 * @flag: Flag indicating if this command can be put into txq.
11034 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
11035 * an iocb command to an HBA with SLI-4 interface spec.
11037 * This function is called with ringlock held. The function will return success
11038 * after it successfully submit the iocb to firmware or after adding to the
11042 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
11043 struct lpfc_iocbq *piocb, uint32_t flag)
11045 struct lpfc_sglq *sglq;
11046 union lpfc_wqe128 wqe;
11047 struct lpfc_queue *wq;
11048 struct lpfc_sli_ring *pring;
11051 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
11052 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
11053 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
11055 wq = phba->sli4_hba.els_wq;
11058 /* Get corresponding ring */
11062 * The WQE can be either 64 or 128 bytes,
11065 lockdep_assert_held(&pring->ring_lock);
11067 if (piocb->sli4_xritag == NO_XRI) {
11068 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11069 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
11072 if (!list_empty(&pring->txq)) {
11073 if (!(flag & SLI_IOCB_RET_IOCB)) {
11074 __lpfc_sli_ringtx_put(phba,
11076 return IOCB_SUCCESS;
11081 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
11083 if (!(flag & SLI_IOCB_RET_IOCB)) {
11084 __lpfc_sli_ringtx_put(phba,
11087 return IOCB_SUCCESS;
11093 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
11094 /* These IO's already have an XRI and a mapped sgl. */
11099 * This is a continuation of a commandi,(CX) so this
11100 * sglq is on the active list
11102 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
11108 piocb->sli4_lxritag = sglq->sli4_lxritag;
11109 piocb->sli4_xritag = sglq->sli4_xritag;
11110 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
11114 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
11117 if (lpfc_sli4_wq_put(wq, &wqe))
11119 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
11125 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
11127 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
11128 * or IOCB for sli-3 function.
11129 * pointer from the lpfc_hba struct.
11132 * IOCB_ERROR - Error
11133 * IOCB_SUCCESS - Success
11137 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
11138 struct lpfc_iocbq *piocb, uint32_t flag)
11140 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
11144 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
11146 * This routine wraps the actual lockless version for issusing IOCB function
11147 * pointer from the lpfc_hba struct.
11150 * IOCB_ERROR - Error
11151 * IOCB_SUCCESS - Success
11155 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11156 struct lpfc_iocbq *piocb, uint32_t flag)
11158 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11162 * lpfc_sli_api_table_setup - Set up sli api function jump table
11163 * @phba: The hba struct for which this call is being executed.
11164 * @dev_grp: The HBA PCI-Device group number.
11166 * This routine sets up the SLI interface API function jump table in @phba
11168 * Returns: 0 - success, -ENODEV - failure.
11171 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11175 case LPFC_PCI_DEV_LP:
11176 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11177 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11178 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11180 case LPFC_PCI_DEV_OC:
11181 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11182 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11183 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11186 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11187 "1419 Invalid HBA PCI-device group: 0x%x\n",
11191 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
11196 * lpfc_sli4_calc_ring - Calculates which ring to use
11197 * @phba: Pointer to HBA context object.
11198 * @piocb: Pointer to command iocb.
11200 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11201 * hba_wqidx, thus we need to calculate the corresponding ring.
11202 * Since ABORTS must go on the same WQ of the command they are
11203 * aborting, we use command's hba_wqidx.
11205 struct lpfc_sli_ring *
11206 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11208 struct lpfc_io_buf *lpfc_cmd;
11210 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11211 if (unlikely(!phba->sli4_hba.hdwq))
11214 * for abort iocb hba_wqidx should already
11215 * be setup based on what work queue we used.
11217 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
11218 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
11219 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11221 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11223 if (unlikely(!phba->sli4_hba.els_wq))
11225 piocb->hba_wqidx = 0;
11226 return phba->sli4_hba.els_wq->pring;
11231 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11232 * @phba: Pointer to HBA context object.
11233 * @ring_number: Ring number
11234 * @piocb: Pointer to command iocb.
11235 * @flag: Flag indicating if this command can be put into txq.
11237 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11238 * function. This function gets the hbalock and calls
11239 * __lpfc_sli_issue_iocb function and will return the error returned
11240 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11241 * functions which do not hold hbalock.
11244 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11245 struct lpfc_iocbq *piocb, uint32_t flag)
11247 struct lpfc_sli_ring *pring;
11248 struct lpfc_queue *eq;
11249 unsigned long iflags;
11252 if (phba->sli_rev == LPFC_SLI_REV4) {
11253 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11255 pring = lpfc_sli4_calc_ring(phba, piocb);
11256 if (unlikely(pring == NULL))
11259 spin_lock_irqsave(&pring->ring_lock, iflags);
11260 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11261 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11263 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11265 /* For now, SLI2/3 will still use hbalock */
11266 spin_lock_irqsave(&phba->hbalock, iflags);
11267 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11268 spin_unlock_irqrestore(&phba->hbalock, iflags);
11274 * lpfc_extra_ring_setup - Extra ring setup function
11275 * @phba: Pointer to HBA context object.
11277 * This function is called while driver attaches with the
11278 * HBA to setup the extra ring. The extra ring is used
11279 * only when driver needs to support target mode functionality
11280 * or IP over FC functionalities.
11282 * This function is called with no lock held. SLI3 only.
11285 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11287 struct lpfc_sli *psli;
11288 struct lpfc_sli_ring *pring;
11292 /* Adjust cmd/rsp ring iocb entries more evenly */
11294 /* Take some away from the FCP ring */
11295 pring = &psli->sli3_ring[LPFC_FCP_RING];
11296 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11297 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11298 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11299 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11301 /* and give them to the extra ring */
11302 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11304 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11305 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11306 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11307 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11309 /* Setup default profile for this ring */
11310 pring->iotag_max = 4096;
11311 pring->num_mask = 1;
11312 pring->prt[0].profile = 0; /* Mask 0 */
11313 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11314 pring->prt[0].type = phba->cfg_multi_ring_type;
11315 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11320 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11321 struct lpfc_nodelist *ndlp)
11323 unsigned long iflags;
11324 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11326 spin_lock_irqsave(&phba->hbalock, iflags);
11327 if (!list_empty(&evtp->evt_listp)) {
11328 spin_unlock_irqrestore(&phba->hbalock, iflags);
11332 /* Incrementing the reference count until the queued work is done. */
11333 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11334 if (!evtp->evt_arg1) {
11335 spin_unlock_irqrestore(&phba->hbalock, iflags);
11338 evtp->evt = LPFC_EVT_RECOVER_PORT;
11339 list_add_tail(&evtp->evt_listp, &phba->work_list);
11340 spin_unlock_irqrestore(&phba->hbalock, iflags);
11342 lpfc_worker_wake_up(phba);
11345 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11346 * @phba: Pointer to HBA context object.
11347 * @iocbq: Pointer to iocb object.
11349 * The async_event handler calls this routine when it receives
11350 * an ASYNC_STATUS_CN event from the port. The port generates
11351 * this event when an Abort Sequence request to an rport fails
11352 * twice in succession. The abort could be originated by the
11353 * driver or by the port. The ABTS could have been for an ELS
11354 * or FCP IO. The port only generates this event when an ABTS
11355 * fails to complete after one retry.
11358 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11359 struct lpfc_iocbq *iocbq)
11361 struct lpfc_nodelist *ndlp = NULL;
11362 uint16_t rpi = 0, vpi = 0;
11363 struct lpfc_vport *vport = NULL;
11365 /* The rpi in the ulpContext is vport-sensitive. */
11366 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11367 rpi = iocbq->iocb.ulpContext;
11369 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11370 "3092 Port generated ABTS async event "
11371 "on vpi %d rpi %d status 0x%x\n",
11372 vpi, rpi, iocbq->iocb.ulpStatus);
11374 vport = lpfc_find_vport_by_vpid(phba, vpi);
11377 ndlp = lpfc_findnode_rpi(vport, rpi);
11381 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11382 lpfc_sli_abts_recover_port(vport, ndlp);
11386 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11387 "3095 Event Context not found, no "
11388 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11389 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
11393 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11394 * @phba: pointer to HBA context object.
11395 * @ndlp: nodelist pointer for the impacted rport.
11396 * @axri: pointer to the wcqe containing the failed exchange.
11398 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11399 * port. The port generates this event when an abort exchange request to an
11400 * rport fails twice in succession with no reply. The abort could be originated
11401 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11404 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11405 struct lpfc_nodelist *ndlp,
11406 struct sli4_wcqe_xri_aborted *axri)
11408 uint32_t ext_status = 0;
11411 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11412 "3115 Node Context not found, driver "
11413 "ignoring abts err event\n");
11417 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11418 "3116 Port generated FCP XRI ABORT event on "
11419 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11420 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11421 bf_get(lpfc_wcqe_xa_xri, axri),
11422 bf_get(lpfc_wcqe_xa_status, axri),
11426 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11427 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11428 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11430 ext_status = axri->parameter & IOERR_PARAM_MASK;
11431 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11432 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11433 lpfc_sli_post_recovery_event(phba, ndlp);
11437 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11438 * @phba: Pointer to HBA context object.
11439 * @pring: Pointer to driver SLI ring object.
11440 * @iocbq: Pointer to iocb object.
11442 * This function is called by the slow ring event handler
11443 * function when there is an ASYNC event iocb in the ring.
11444 * This function is called with no lock held.
11445 * Currently this function handles only temperature related
11446 * ASYNC events. The function decodes the temperature sensor
11447 * event message and posts events for the management applications.
11450 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11451 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11455 struct temp_event temp_event_data;
11456 struct Scsi_Host *shost;
11459 icmd = &iocbq->iocb;
11460 evt_code = icmd->un.asyncstat.evt_code;
11462 switch (evt_code) {
11463 case ASYNC_TEMP_WARN:
11464 case ASYNC_TEMP_SAFE:
11465 temp_event_data.data = (uint32_t) icmd->ulpContext;
11466 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11467 if (evt_code == ASYNC_TEMP_WARN) {
11468 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11470 "0347 Adapter is very hot, please take "
11471 "corrective action. temperature : %d Celsius\n",
11472 (uint32_t) icmd->ulpContext);
11474 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11476 "0340 Adapter temperature is OK now. "
11477 "temperature : %d Celsius\n",
11478 (uint32_t) icmd->ulpContext);
11481 /* Send temperature change event to applications */
11482 shost = lpfc_shost_from_vport(phba->pport);
11483 fc_host_post_vendor_event(shost, fc_get_event_number(),
11484 sizeof(temp_event_data), (char *) &temp_event_data,
11485 LPFC_NL_VENDOR_ID);
11487 case ASYNC_STATUS_CN:
11488 lpfc_sli_abts_err_handler(phba, iocbq);
11491 iocb_w = (uint32_t *) icmd;
11492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11493 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11495 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11496 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11497 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11498 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11499 pring->ringno, icmd->un.asyncstat.evt_code,
11500 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11501 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11502 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11503 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11511 * lpfc_sli4_setup - SLI ring setup function
11512 * @phba: Pointer to HBA context object.
11514 * lpfc_sli_setup sets up rings of the SLI interface with
11515 * number of iocbs per ring and iotags. This function is
11516 * called while driver attach to the HBA and before the
11517 * interrupts are enabled. So there is no need for locking.
11519 * This function always returns 0.
11522 lpfc_sli4_setup(struct lpfc_hba *phba)
11524 struct lpfc_sli_ring *pring;
11526 pring = phba->sli4_hba.els_wq->pring;
11527 pring->num_mask = LPFC_MAX_RING_MASK;
11528 pring->prt[0].profile = 0; /* Mask 0 */
11529 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11530 pring->prt[0].type = FC_TYPE_ELS;
11531 pring->prt[0].lpfc_sli_rcv_unsol_event =
11532 lpfc_els_unsol_event;
11533 pring->prt[1].profile = 0; /* Mask 1 */
11534 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11535 pring->prt[1].type = FC_TYPE_ELS;
11536 pring->prt[1].lpfc_sli_rcv_unsol_event =
11537 lpfc_els_unsol_event;
11538 pring->prt[2].profile = 0; /* Mask 2 */
11539 /* NameServer Inquiry */
11540 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11542 pring->prt[2].type = FC_TYPE_CT;
11543 pring->prt[2].lpfc_sli_rcv_unsol_event =
11544 lpfc_ct_unsol_event;
11545 pring->prt[3].profile = 0; /* Mask 3 */
11546 /* NameServer response */
11547 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11549 pring->prt[3].type = FC_TYPE_CT;
11550 pring->prt[3].lpfc_sli_rcv_unsol_event =
11551 lpfc_ct_unsol_event;
11556 * lpfc_sli_setup - SLI ring setup function
11557 * @phba: Pointer to HBA context object.
11559 * lpfc_sli_setup sets up rings of the SLI interface with
11560 * number of iocbs per ring and iotags. This function is
11561 * called while driver attach to the HBA and before the
11562 * interrupts are enabled. So there is no need for locking.
11564 * This function always returns 0. SLI3 only.
11567 lpfc_sli_setup(struct lpfc_hba *phba)
11569 int i, totiocbsize = 0;
11570 struct lpfc_sli *psli = &phba->sli;
11571 struct lpfc_sli_ring *pring;
11573 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11574 psli->sli_flag = 0;
11576 psli->iocbq_lookup = NULL;
11577 psli->iocbq_lookup_len = 0;
11578 psli->last_iotag = 0;
11580 for (i = 0; i < psli->num_rings; i++) {
11581 pring = &psli->sli3_ring[i];
11583 case LPFC_FCP_RING: /* ring 0 - FCP */
11584 /* numCiocb and numRiocb are used in config_port */
11585 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11586 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11587 pring->sli.sli3.numCiocb +=
11588 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11589 pring->sli.sli3.numRiocb +=
11590 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11591 pring->sli.sli3.numCiocb +=
11592 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11593 pring->sli.sli3.numRiocb +=
11594 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11595 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11596 SLI3_IOCB_CMD_SIZE :
11597 SLI2_IOCB_CMD_SIZE;
11598 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11599 SLI3_IOCB_RSP_SIZE :
11600 SLI2_IOCB_RSP_SIZE;
11601 pring->iotag_ctr = 0;
11603 (phba->cfg_hba_queue_depth * 2);
11604 pring->fast_iotag = pring->iotag_max;
11605 pring->num_mask = 0;
11607 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11608 /* numCiocb and numRiocb are used in config_port */
11609 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11610 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11611 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11612 SLI3_IOCB_CMD_SIZE :
11613 SLI2_IOCB_CMD_SIZE;
11614 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11615 SLI3_IOCB_RSP_SIZE :
11616 SLI2_IOCB_RSP_SIZE;
11617 pring->iotag_max = phba->cfg_hba_queue_depth;
11618 pring->num_mask = 0;
11620 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11621 /* numCiocb and numRiocb are used in config_port */
11622 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11623 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11624 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11625 SLI3_IOCB_CMD_SIZE :
11626 SLI2_IOCB_CMD_SIZE;
11627 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11628 SLI3_IOCB_RSP_SIZE :
11629 SLI2_IOCB_RSP_SIZE;
11630 pring->fast_iotag = 0;
11631 pring->iotag_ctr = 0;
11632 pring->iotag_max = 4096;
11633 pring->lpfc_sli_rcv_async_status =
11634 lpfc_sli_async_event_handler;
11635 pring->num_mask = LPFC_MAX_RING_MASK;
11636 pring->prt[0].profile = 0; /* Mask 0 */
11637 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11638 pring->prt[0].type = FC_TYPE_ELS;
11639 pring->prt[0].lpfc_sli_rcv_unsol_event =
11640 lpfc_els_unsol_event;
11641 pring->prt[1].profile = 0; /* Mask 1 */
11642 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11643 pring->prt[1].type = FC_TYPE_ELS;
11644 pring->prt[1].lpfc_sli_rcv_unsol_event =
11645 lpfc_els_unsol_event;
11646 pring->prt[2].profile = 0; /* Mask 2 */
11647 /* NameServer Inquiry */
11648 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11650 pring->prt[2].type = FC_TYPE_CT;
11651 pring->prt[2].lpfc_sli_rcv_unsol_event =
11652 lpfc_ct_unsol_event;
11653 pring->prt[3].profile = 0; /* Mask 3 */
11654 /* NameServer response */
11655 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11657 pring->prt[3].type = FC_TYPE_CT;
11658 pring->prt[3].lpfc_sli_rcv_unsol_event =
11659 lpfc_ct_unsol_event;
11662 totiocbsize += (pring->sli.sli3.numCiocb *
11663 pring->sli.sli3.sizeCiocb) +
11664 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11666 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11667 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11668 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11669 "SLI2 SLIM Data: x%x x%lx\n",
11670 phba->brd_no, totiocbsize,
11671 (unsigned long) MAX_SLIM_IOCB_SIZE);
11673 if (phba->cfg_multi_ring_support == 2)
11674 lpfc_extra_ring_setup(phba);
11680 * lpfc_sli4_queue_init - Queue initialization function
11681 * @phba: Pointer to HBA context object.
11683 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11684 * ring. This function also initializes ring indices of each ring.
11685 * This function is called during the initialization of the SLI
11686 * interface of an HBA.
11687 * This function is called with no lock held and always returns
11691 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11693 struct lpfc_sli *psli;
11694 struct lpfc_sli_ring *pring;
11698 spin_lock_irq(&phba->hbalock);
11699 INIT_LIST_HEAD(&psli->mboxq);
11700 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11701 /* Initialize list headers for txq and txcmplq as double linked lists */
11702 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11703 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11705 pring->ringno = LPFC_FCP_RING;
11706 pring->txcmplq_cnt = 0;
11707 INIT_LIST_HEAD(&pring->txq);
11708 INIT_LIST_HEAD(&pring->txcmplq);
11709 INIT_LIST_HEAD(&pring->iocb_continueq);
11710 spin_lock_init(&pring->ring_lock);
11712 pring = phba->sli4_hba.els_wq->pring;
11714 pring->ringno = LPFC_ELS_RING;
11715 pring->txcmplq_cnt = 0;
11716 INIT_LIST_HEAD(&pring->txq);
11717 INIT_LIST_HEAD(&pring->txcmplq);
11718 INIT_LIST_HEAD(&pring->iocb_continueq);
11719 spin_lock_init(&pring->ring_lock);
11721 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11722 pring = phba->sli4_hba.nvmels_wq->pring;
11724 pring->ringno = LPFC_ELS_RING;
11725 pring->txcmplq_cnt = 0;
11726 INIT_LIST_HEAD(&pring->txq);
11727 INIT_LIST_HEAD(&pring->txcmplq);
11728 INIT_LIST_HEAD(&pring->iocb_continueq);
11729 spin_lock_init(&pring->ring_lock);
11732 spin_unlock_irq(&phba->hbalock);
11736 * lpfc_sli_queue_init - Queue initialization function
11737 * @phba: Pointer to HBA context object.
11739 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11740 * ring. This function also initializes ring indices of each ring.
11741 * This function is called during the initialization of the SLI
11742 * interface of an HBA.
11743 * This function is called with no lock held and always returns
11747 lpfc_sli_queue_init(struct lpfc_hba *phba)
11749 struct lpfc_sli *psli;
11750 struct lpfc_sli_ring *pring;
11754 spin_lock_irq(&phba->hbalock);
11755 INIT_LIST_HEAD(&psli->mboxq);
11756 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11757 /* Initialize list headers for txq and txcmplq as double linked lists */
11758 for (i = 0; i < psli->num_rings; i++) {
11759 pring = &psli->sli3_ring[i];
11761 pring->sli.sli3.next_cmdidx = 0;
11762 pring->sli.sli3.local_getidx = 0;
11763 pring->sli.sli3.cmdidx = 0;
11764 INIT_LIST_HEAD(&pring->iocb_continueq);
11765 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11766 INIT_LIST_HEAD(&pring->postbufq);
11768 INIT_LIST_HEAD(&pring->txq);
11769 INIT_LIST_HEAD(&pring->txcmplq);
11770 spin_lock_init(&pring->ring_lock);
11772 spin_unlock_irq(&phba->hbalock);
11776 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11777 * @phba: Pointer to HBA context object.
11779 * This routine flushes the mailbox command subsystem. It will unconditionally
11780 * flush all the mailbox commands in the three possible stages in the mailbox
11781 * command sub-system: pending mailbox command queue; the outstanding mailbox
11782 * command; and completed mailbox command queue. It is caller's responsibility
11783 * to make sure that the driver is in the proper state to flush the mailbox
11784 * command sub-system. Namely, the posting of mailbox commands into the
11785 * pending mailbox command queue from the various clients must be stopped;
11786 * either the HBA is in a state that it will never works on the outstanding
11787 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11788 * mailbox command has been completed.
11791 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11793 LIST_HEAD(completions);
11794 struct lpfc_sli *psli = &phba->sli;
11796 unsigned long iflag;
11798 /* Disable softirqs, including timers from obtaining phba->hbalock */
11799 local_bh_disable();
11801 /* Flush all the mailbox commands in the mbox system */
11802 spin_lock_irqsave(&phba->hbalock, iflag);
11804 /* The pending mailbox command queue */
11805 list_splice_init(&phba->sli.mboxq, &completions);
11806 /* The outstanding active mailbox command */
11807 if (psli->mbox_active) {
11808 list_add_tail(&psli->mbox_active->list, &completions);
11809 psli->mbox_active = NULL;
11810 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11812 /* The completed mailbox command queue */
11813 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11814 spin_unlock_irqrestore(&phba->hbalock, iflag);
11816 /* Enable softirqs again, done with phba->hbalock */
11819 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11820 while (!list_empty(&completions)) {
11821 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11822 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11823 if (pmb->mbox_cmpl)
11824 pmb->mbox_cmpl(phba, pmb);
11829 * lpfc_sli_host_down - Vport cleanup function
11830 * @vport: Pointer to virtual port object.
11832 * lpfc_sli_host_down is called to clean up the resources
11833 * associated with a vport before destroying virtual
11834 * port data structures.
11835 * This function does following operations:
11836 * - Free discovery resources associated with this virtual
11838 * - Free iocbs associated with this virtual port in
11840 * - Send abort for all iocb commands associated with this
11841 * vport in txcmplq.
11843 * This function is called with no lock held and always returns 1.
11846 lpfc_sli_host_down(struct lpfc_vport *vport)
11848 LIST_HEAD(completions);
11849 struct lpfc_hba *phba = vport->phba;
11850 struct lpfc_sli *psli = &phba->sli;
11851 struct lpfc_queue *qp = NULL;
11852 struct lpfc_sli_ring *pring;
11853 struct lpfc_iocbq *iocb, *next_iocb;
11855 unsigned long flags = 0;
11856 uint16_t prev_pring_flag;
11858 lpfc_cleanup_discovery_resources(vport);
11860 spin_lock_irqsave(&phba->hbalock, flags);
11863 * Error everything on the txq since these iocbs
11864 * have not been given to the FW yet.
11865 * Also issue ABTS for everything on the txcmplq
11867 if (phba->sli_rev != LPFC_SLI_REV4) {
11868 for (i = 0; i < psli->num_rings; i++) {
11869 pring = &psli->sli3_ring[i];
11870 prev_pring_flag = pring->flag;
11871 /* Only slow rings */
11872 if (pring->ringno == LPFC_ELS_RING) {
11873 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11874 /* Set the lpfc data pending flag */
11875 set_bit(LPFC_DATA_READY, &phba->data_flags);
11877 list_for_each_entry_safe(iocb, next_iocb,
11878 &pring->txq, list) {
11879 if (iocb->vport != vport)
11881 list_move_tail(&iocb->list, &completions);
11883 list_for_each_entry_safe(iocb, next_iocb,
11884 &pring->txcmplq, list) {
11885 if (iocb->vport != vport)
11887 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11890 pring->flag = prev_pring_flag;
11893 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11897 if (pring == phba->sli4_hba.els_wq->pring) {
11898 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11899 /* Set the lpfc data pending flag */
11900 set_bit(LPFC_DATA_READY, &phba->data_flags);
11902 prev_pring_flag = pring->flag;
11903 spin_lock(&pring->ring_lock);
11904 list_for_each_entry_safe(iocb, next_iocb,
11905 &pring->txq, list) {
11906 if (iocb->vport != vport)
11908 list_move_tail(&iocb->list, &completions);
11910 spin_unlock(&pring->ring_lock);
11911 list_for_each_entry_safe(iocb, next_iocb,
11912 &pring->txcmplq, list) {
11913 if (iocb->vport != vport)
11915 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11918 pring->flag = prev_pring_flag;
11921 spin_unlock_irqrestore(&phba->hbalock, flags);
11923 /* Make sure HBA is alive */
11924 lpfc_issue_hb_tmo(phba);
11926 /* Cancel all the IOCBs from the completions list */
11927 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11933 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11934 * @phba: Pointer to HBA context object.
11936 * This function cleans up all iocb, buffers, mailbox commands
11937 * while shutting down the HBA. This function is called with no
11938 * lock held and always returns 1.
11939 * This function does the following to cleanup driver resources:
11940 * - Free discovery resources for each virtual port
11941 * - Cleanup any pending fabric iocbs
11942 * - Iterate through the iocb txq and free each entry
11944 * - Free up any buffer posted to the HBA
11945 * - Free mailbox commands in the mailbox queue.
11948 lpfc_sli_hba_down(struct lpfc_hba *phba)
11950 LIST_HEAD(completions);
11951 struct lpfc_sli *psli = &phba->sli;
11952 struct lpfc_queue *qp = NULL;
11953 struct lpfc_sli_ring *pring;
11954 struct lpfc_dmabuf *buf_ptr;
11955 unsigned long flags = 0;
11958 /* Shutdown the mailbox command sub-system */
11959 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11961 lpfc_hba_down_prep(phba);
11963 /* Disable softirqs, including timers from obtaining phba->hbalock */
11964 local_bh_disable();
11966 lpfc_fabric_abort_hba(phba);
11968 spin_lock_irqsave(&phba->hbalock, flags);
11971 * Error everything on the txq since these iocbs
11972 * have not been given to the FW yet.
11974 if (phba->sli_rev != LPFC_SLI_REV4) {
11975 for (i = 0; i < psli->num_rings; i++) {
11976 pring = &psli->sli3_ring[i];
11977 /* Only slow rings */
11978 if (pring->ringno == LPFC_ELS_RING) {
11979 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11980 /* Set the lpfc data pending flag */
11981 set_bit(LPFC_DATA_READY, &phba->data_flags);
11983 list_splice_init(&pring->txq, &completions);
11986 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11990 spin_lock(&pring->ring_lock);
11991 list_splice_init(&pring->txq, &completions);
11992 spin_unlock(&pring->ring_lock);
11993 if (pring == phba->sli4_hba.els_wq->pring) {
11994 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11995 /* Set the lpfc data pending flag */
11996 set_bit(LPFC_DATA_READY, &phba->data_flags);
12000 spin_unlock_irqrestore(&phba->hbalock, flags);
12002 /* Cancel all the IOCBs from the completions list */
12003 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12006 spin_lock_irqsave(&phba->hbalock, flags);
12007 list_splice_init(&phba->elsbuf, &completions);
12008 phba->elsbuf_cnt = 0;
12009 phba->elsbuf_prev_cnt = 0;
12010 spin_unlock_irqrestore(&phba->hbalock, flags);
12012 while (!list_empty(&completions)) {
12013 list_remove_head(&completions, buf_ptr,
12014 struct lpfc_dmabuf, list);
12015 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12019 /* Enable softirqs again, done with phba->hbalock */
12022 /* Return any active mbox cmds */
12023 del_timer_sync(&psli->mbox_tmo);
12025 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12026 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12027 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12033 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12034 * @srcp: Source memory pointer.
12035 * @destp: Destination memory pointer.
12036 * @cnt: Number of words required to be copied.
12038 * This function is used for copying data between driver memory
12039 * and the SLI memory. This function also changes the endianness
12040 * of each word if native endianness is different from SLI
12041 * endianness. This function can be called with or without
12045 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12047 uint32_t *src = srcp;
12048 uint32_t *dest = destp;
12052 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12054 ldata = le32_to_cpu(ldata);
12063 * lpfc_sli_bemem_bcopy - SLI memory copy function
12064 * @srcp: Source memory pointer.
12065 * @destp: Destination memory pointer.
12066 * @cnt: Number of words required to be copied.
12068 * This function is used for copying data between a data structure
12069 * with big endian representation to local endianness.
12070 * This function can be called with or without lock.
12073 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12075 uint32_t *src = srcp;
12076 uint32_t *dest = destp;
12080 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12082 ldata = be32_to_cpu(ldata);
12090 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12091 * @phba: Pointer to HBA context object.
12092 * @pring: Pointer to driver SLI ring object.
12093 * @mp: Pointer to driver buffer object.
12095 * This function is called with no lock held.
12096 * It always return zero after adding the buffer to the postbufq
12100 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12101 struct lpfc_dmabuf *mp)
12103 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12105 spin_lock_irq(&phba->hbalock);
12106 list_add_tail(&mp->list, &pring->postbufq);
12107 pring->postbufq_cnt++;
12108 spin_unlock_irq(&phba->hbalock);
12113 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12114 * @phba: Pointer to HBA context object.
12116 * When HBQ is enabled, buffers are searched based on tags. This function
12117 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12118 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12119 * does not conflict with tags of buffer posted for unsolicited events.
12120 * The function returns the allocated tag. The function is called with
12124 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12126 spin_lock_irq(&phba->hbalock);
12127 phba->buffer_tag_count++;
12129 * Always set the QUE_BUFTAG_BIT to distiguish between
12130 * a tag assigned by HBQ.
12132 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12133 spin_unlock_irq(&phba->hbalock);
12134 return phba->buffer_tag_count;
12138 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12139 * @phba: Pointer to HBA context object.
12140 * @pring: Pointer to driver SLI ring object.
12141 * @tag: Buffer tag.
12143 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12144 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12145 * iocb is posted to the response ring with the tag of the buffer.
12146 * This function searches the pring->postbufq list using the tag
12147 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12148 * iocb. If the buffer is found then lpfc_dmabuf object of the
12149 * buffer is returned to the caller else NULL is returned.
12150 * This function is called with no lock held.
12152 struct lpfc_dmabuf *
12153 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12156 struct lpfc_dmabuf *mp, *next_mp;
12157 struct list_head *slp = &pring->postbufq;
12159 /* Search postbufq, from the beginning, looking for a match on tag */
12160 spin_lock_irq(&phba->hbalock);
12161 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12162 if (mp->buffer_tag == tag) {
12163 list_del_init(&mp->list);
12164 pring->postbufq_cnt--;
12165 spin_unlock_irq(&phba->hbalock);
12170 spin_unlock_irq(&phba->hbalock);
12171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12172 "0402 Cannot find virtual addr for buffer tag on "
12173 "ring %d Data x%lx x%px x%px x%x\n",
12174 pring->ringno, (unsigned long) tag,
12175 slp->next, slp->prev, pring->postbufq_cnt);
12181 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12182 * @phba: Pointer to HBA context object.
12183 * @pring: Pointer to driver SLI ring object.
12184 * @phys: DMA address of the buffer.
12186 * This function searches the buffer list using the dma_address
12187 * of unsolicited event to find the driver's lpfc_dmabuf object
12188 * corresponding to the dma_address. The function returns the
12189 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12190 * This function is called by the ct and els unsolicited event
12191 * handlers to get the buffer associated with the unsolicited
12194 * This function is called with no lock held.
12196 struct lpfc_dmabuf *
12197 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12200 struct lpfc_dmabuf *mp, *next_mp;
12201 struct list_head *slp = &pring->postbufq;
12203 /* Search postbufq, from the beginning, looking for a match on phys */
12204 spin_lock_irq(&phba->hbalock);
12205 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12206 if (mp->phys == phys) {
12207 list_del_init(&mp->list);
12208 pring->postbufq_cnt--;
12209 spin_unlock_irq(&phba->hbalock);
12214 spin_unlock_irq(&phba->hbalock);
12215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12216 "0410 Cannot find virtual addr for mapped buf on "
12217 "ring %d Data x%llx x%px x%px x%x\n",
12218 pring->ringno, (unsigned long long)phys,
12219 slp->next, slp->prev, pring->postbufq_cnt);
12224 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12225 * @phba: Pointer to HBA context object.
12226 * @cmdiocb: Pointer to driver command iocb object.
12227 * @rspiocb: Pointer to driver response iocb object.
12229 * This function is the completion handler for the abort iocbs for
12230 * ELS commands. This function is called from the ELS ring event
12231 * handler with no lock held. This function frees memory resources
12232 * associated with the abort iocb.
12235 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12236 struct lpfc_iocbq *rspiocb)
12238 IOCB_t *irsp = &rspiocb->iocb;
12239 uint16_t abort_iotag, abort_context;
12240 struct lpfc_iocbq *abort_iocb = NULL;
12242 if (irsp->ulpStatus) {
12245 * Assume that the port already completed and returned, or
12246 * will return the iocb. Just Log the message.
12248 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
12249 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
12251 spin_lock_irq(&phba->hbalock);
12252 if (phba->sli_rev < LPFC_SLI_REV4) {
12253 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
12254 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
12255 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
12256 spin_unlock_irq(&phba->hbalock);
12259 if (abort_iotag != 0 &&
12260 abort_iotag <= phba->sli.last_iotag)
12262 phba->sli.iocbq_lookup[abort_iotag];
12264 /* For sli4 the abort_tag is the XRI,
12265 * so the abort routine puts the iotag of the iocb
12266 * being aborted in the context field of the abort
12269 abort_iocb = phba->sli.iocbq_lookup[abort_context];
12271 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12272 "0327 Cannot abort els iocb x%px "
12273 "with tag %x context %x, abort status %x, "
12275 abort_iocb, abort_iotag, abort_context,
12276 irsp->ulpStatus, irsp->un.ulpWord[4]);
12278 spin_unlock_irq(&phba->hbalock);
12281 lpfc_sli_release_iocbq(phba, cmdiocb);
12286 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12287 * @phba: Pointer to HBA context object.
12288 * @cmdiocb: Pointer to driver command iocb object.
12289 * @rspiocb: Pointer to driver response iocb object.
12291 * The function is called from SLI ring event handler with no
12292 * lock held. This function is the completion handler for ELS commands
12293 * which are aborted. The function frees memory resources used for
12294 * the aborted ELS commands.
12297 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12298 struct lpfc_iocbq *rspiocb)
12300 struct lpfc_nodelist *ndlp = NULL;
12301 IOCB_t *irsp = &rspiocb->iocb;
12303 /* ELS cmd tag <ulpIoTag> completes */
12304 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12305 "0139 Ignoring ELS cmd code x%x completion Data: "
12307 irsp->ulpIoTag, irsp->ulpStatus,
12308 irsp->un.ulpWord[4], irsp->ulpTimeout);
12310 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12311 * if exchange is busy.
12313 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
12314 ndlp = cmdiocb->context_un.ndlp;
12315 lpfc_ct_free_iocb(phba, cmdiocb);
12317 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
12318 lpfc_els_free_iocb(phba, cmdiocb);
12321 lpfc_nlp_put(ndlp);
12325 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12326 * @phba: Pointer to HBA context object.
12327 * @pring: Pointer to driver SLI ring object.
12328 * @cmdiocb: Pointer to driver command iocb object.
12329 * @cmpl: completion function.
12331 * This function issues an abort iocb for the provided command iocb. In case
12332 * of unloading, the abort iocb will not be issued to commands on the ELS
12333 * ring. Instead, the callback function shall be changed to those commands
12334 * so that nothing happens when them finishes. This function is called with
12335 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12336 * when the command iocb is an abort request.
12340 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12341 struct lpfc_iocbq *cmdiocb, void *cmpl)
12343 struct lpfc_vport *vport = cmdiocb->vport;
12344 struct lpfc_iocbq *abtsiocbp;
12345 IOCB_t *icmd = NULL;
12346 IOCB_t *iabt = NULL;
12347 int retval = IOCB_ERROR;
12348 unsigned long iflags;
12349 struct lpfc_nodelist *ndlp;
12352 * There are certain command types we don't want to abort. And we
12353 * don't want to abort commands that are already in the process of
12356 icmd = &cmdiocb->iocb;
12357 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12358 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
12359 cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
12360 return IOCB_ABORTING;
12363 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
12364 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
12366 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
12371 * If we're unloading, don't abort iocb on the ELS ring, but change
12372 * the callback so that nothing happens when it finishes.
12374 if ((vport->load_flag & FC_UNLOADING) &&
12375 pring->ringno == LPFC_ELS_RING) {
12376 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
12377 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
12379 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
12383 /* issue ABTS for this IOCB based on iotag */
12384 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12385 if (abtsiocbp == NULL)
12386 return IOCB_NORESOURCE;
12388 /* This signals the response to set the correct status
12389 * before calling the completion handler
12391 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
12393 iabt = &abtsiocbp->iocb;
12394 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
12395 iabt->un.acxri.abortContextTag = icmd->ulpContext;
12396 if (phba->sli_rev == LPFC_SLI_REV4) {
12397 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
12398 if (pring->ringno == LPFC_ELS_RING)
12399 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
12401 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
12402 if (pring->ringno == LPFC_ELS_RING) {
12403 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
12404 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
12408 iabt->ulpClass = icmd->ulpClass;
12410 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12411 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12412 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
12413 abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12414 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
12415 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
12417 if (phba->link_state < LPFC_LINK_UP ||
12418 (phba->sli_rev == LPFC_SLI_REV4 &&
12419 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
12420 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
12422 iabt->ulpCommand = CMD_ABORT_XRI_CN;
12425 abtsiocbp->iocb_cmpl = cmpl;
12427 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
12428 abtsiocbp->vport = vport;
12430 if (phba->sli_rev == LPFC_SLI_REV4) {
12431 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12432 if (unlikely(pring == NULL))
12433 goto abort_iotag_exit;
12434 /* Note: both hbalock and ring_lock need to be set here */
12435 spin_lock_irqsave(&pring->ring_lock, iflags);
12436 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12438 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12440 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12446 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12447 "0339 Abort xri x%x, original iotag x%x, "
12448 "abort cmd iotag x%x retval x%x\n",
12449 iabt->un.acxri.abortIoTag,
12450 iabt->un.acxri.abortContextTag,
12451 abtsiocbp->iotag, retval);
12454 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12455 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12459 * Caller to this routine should check for IOCB_ERROR
12460 * and handle it properly. This routine no longer removes
12461 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12467 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12468 * @phba: pointer to lpfc HBA data structure.
12470 * This routine will abort all pending and outstanding iocbs to an HBA.
12473 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12475 struct lpfc_sli *psli = &phba->sli;
12476 struct lpfc_sli_ring *pring;
12477 struct lpfc_queue *qp = NULL;
12480 if (phba->sli_rev != LPFC_SLI_REV4) {
12481 for (i = 0; i < psli->num_rings; i++) {
12482 pring = &psli->sli3_ring[i];
12483 lpfc_sli_abort_iocb_ring(phba, pring);
12487 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12491 lpfc_sli_abort_iocb_ring(phba, pring);
12496 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12497 * @iocbq: Pointer to iocb object.
12498 * @vport: Pointer to driver virtual port object.
12500 * This function acts as an iocb filter for functions which abort FCP iocbs.
12503 * -ENODEV, if a null iocb or vport ptr is encountered
12504 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12505 * driver already started the abort process, or is an abort iocb itself
12506 * 0, passes criteria for aborting the FCP I/O iocb
12509 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12510 struct lpfc_vport *vport)
12512 IOCB_t *icmd = NULL;
12514 /* No null ptr vports */
12515 if (!iocbq || iocbq->vport != vport)
12518 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12519 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12521 icmd = &iocbq->iocb;
12522 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
12523 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
12524 (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12525 (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12526 icmd->ulpCommand == CMD_CLOSE_XRI_CN))
12533 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12534 * @iocbq: Pointer to driver iocb object.
12535 * @vport: Pointer to driver virtual port object.
12536 * @tgt_id: SCSI ID of the target.
12537 * @lun_id: LUN ID of the scsi device.
12538 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12540 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12544 * 0 if the filtering criteria is met for the given iocb and will return
12545 * 1 if the filtering criteria is not met.
12546 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12547 * given iocb is for the SCSI device specified by vport, tgt_id and
12548 * lun_id parameter.
12549 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12550 * given iocb is for the SCSI target specified by vport and tgt_id
12552 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12553 * given iocb is for the SCSI host associated with the given vport.
12554 * This function is called with no locks held.
12557 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12558 uint16_t tgt_id, uint64_t lun_id,
12559 lpfc_ctx_cmd ctx_cmd)
12561 struct lpfc_io_buf *lpfc_cmd;
12564 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12566 if (lpfc_cmd->pCmd == NULL)
12571 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12572 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12573 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12577 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12578 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12581 case LPFC_CTX_HOST:
12585 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12586 __func__, ctx_cmd);
12594 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12595 * @vport: Pointer to virtual port.
12596 * @tgt_id: SCSI ID of the target.
12597 * @lun_id: LUN ID of the scsi device.
12598 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12600 * This function returns number of FCP commands pending for the vport.
12601 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12602 * commands pending on the vport associated with SCSI device specified
12603 * by tgt_id and lun_id parameters.
12604 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12605 * commands pending on the vport associated with SCSI target specified
12606 * by tgt_id parameter.
12607 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12608 * commands pending on the vport.
12609 * This function returns the number of iocbs which satisfy the filter.
12610 * This function is called without any lock held.
12613 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12614 lpfc_ctx_cmd ctx_cmd)
12616 struct lpfc_hba *phba = vport->phba;
12617 struct lpfc_iocbq *iocbq;
12618 IOCB_t *icmd = NULL;
12620 unsigned long iflags;
12622 spin_lock_irqsave(&phba->hbalock, iflags);
12623 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12624 iocbq = phba->sli.iocbq_lookup[i];
12626 if (!iocbq || iocbq->vport != vport)
12628 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
12629 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
12632 /* Include counting outstanding aborts */
12633 icmd = &iocbq->iocb;
12634 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
12635 icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
12640 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12644 spin_unlock_irqrestore(&phba->hbalock, iflags);
12650 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12651 * @phba: Pointer to HBA context object
12652 * @cmdiocb: Pointer to command iocb object.
12653 * @wcqe: pointer to the complete wcqe
12655 * This function is called when an aborted FCP iocb completes. This
12656 * function is called by the ring event handler with no lock held.
12657 * This function frees the iocb. It is called for sli-4 adapters.
12660 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12661 struct lpfc_wcqe_complete *wcqe)
12663 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12664 "3017 ABORT_XRI_CN completing on rpi x%x "
12665 "original iotag x%x, abort cmd iotag x%x "
12666 "status 0x%x, reason 0x%x\n",
12667 cmdiocb->iocb.un.acxri.abortContextTag,
12668 cmdiocb->iocb.un.acxri.abortIoTag,
12670 (bf_get(lpfc_wcqe_c_status, wcqe)
12671 & LPFC_IOCB_STATUS_MASK),
12673 lpfc_sli_release_iocbq(phba, cmdiocb);
12677 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12678 * @phba: Pointer to HBA context object
12679 * @cmdiocb: Pointer to command iocb object.
12680 * @rspiocb: Pointer to response iocb object.
12682 * This function is called when an aborted FCP iocb completes. This
12683 * function is called by the ring event handler with no lock held.
12684 * This function frees the iocb.
12687 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12688 struct lpfc_iocbq *rspiocb)
12690 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12691 "3096 ABORT_XRI_CN completing on rpi x%x "
12692 "original iotag x%x, abort cmd iotag x%x "
12693 "status 0x%x, reason 0x%x\n",
12694 cmdiocb->iocb.un.acxri.abortContextTag,
12695 cmdiocb->iocb.un.acxri.abortIoTag,
12696 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
12697 rspiocb->iocb.un.ulpWord[4]);
12698 lpfc_sli_release_iocbq(phba, cmdiocb);
12703 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12704 * @vport: Pointer to virtual port.
12705 * @tgt_id: SCSI ID of the target.
12706 * @lun_id: LUN ID of the scsi device.
12707 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12709 * This function sends an abort command for every SCSI command
12710 * associated with the given virtual port pending on the ring
12711 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12712 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12713 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12714 * followed by lpfc_sli_validate_fcp_iocb.
12716 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12717 * FCP iocbs associated with lun specified by tgt_id and lun_id
12719 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12720 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12721 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12722 * FCP iocbs associated with virtual port.
12723 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12724 * lpfc_sli4_calc_ring is used.
12725 * This function returns number of iocbs it failed to abort.
12726 * This function is called with no locks held.
12729 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12730 lpfc_ctx_cmd abort_cmd)
12732 struct lpfc_hba *phba = vport->phba;
12733 struct lpfc_sli_ring *pring = NULL;
12734 struct lpfc_iocbq *iocbq;
12735 int errcnt = 0, ret_val = 0;
12736 unsigned long iflags;
12738 void *fcp_cmpl = NULL;
12740 /* all I/Os are in process of being flushed */
12741 if (phba->hba_flag & HBA_IOQ_FLUSH)
12744 for (i = 1; i <= phba->sli.last_iotag; i++) {
12745 iocbq = phba->sli.iocbq_lookup[i];
12747 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12750 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12754 spin_lock_irqsave(&phba->hbalock, iflags);
12755 if (phba->sli_rev == LPFC_SLI_REV3) {
12756 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12757 fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12758 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12759 pring = lpfc_sli4_calc_ring(phba, iocbq);
12760 fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12762 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12764 spin_unlock_irqrestore(&phba->hbalock, iflags);
12765 if (ret_val != IOCB_SUCCESS)
12773 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12774 * @vport: Pointer to virtual port.
12775 * @pring: Pointer to driver SLI ring object.
12776 * @tgt_id: SCSI ID of the target.
12777 * @lun_id: LUN ID of the scsi device.
12778 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12780 * This function sends an abort command for every SCSI command
12781 * associated with the given virtual port pending on the ring
12782 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12783 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12784 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12785 * followed by lpfc_sli_validate_fcp_iocb.
12787 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12788 * FCP iocbs associated with lun specified by tgt_id and lun_id
12790 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12791 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12792 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12793 * FCP iocbs associated with virtual port.
12794 * This function returns number of iocbs it aborted .
12795 * This function is called with no locks held right after a taskmgmt
12799 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12800 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12802 struct lpfc_hba *phba = vport->phba;
12803 struct lpfc_io_buf *lpfc_cmd;
12804 struct lpfc_iocbq *abtsiocbq;
12805 struct lpfc_nodelist *ndlp;
12806 struct lpfc_iocbq *iocbq;
12808 int sum, i, ret_val;
12809 unsigned long iflags;
12810 struct lpfc_sli_ring *pring_s4 = NULL;
12812 spin_lock_irqsave(&phba->hbalock, iflags);
12814 /* all I/Os are in process of being flushed */
12815 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12816 spin_unlock_irqrestore(&phba->hbalock, iflags);
12821 for (i = 1; i <= phba->sli.last_iotag; i++) {
12822 iocbq = phba->sli.iocbq_lookup[i];
12824 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12827 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12831 /* Guard against IO completion being called at same time */
12832 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12833 spin_lock(&lpfc_cmd->buf_lock);
12835 if (!lpfc_cmd->pCmd) {
12836 spin_unlock(&lpfc_cmd->buf_lock);
12840 if (phba->sli_rev == LPFC_SLI_REV4) {
12842 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12844 spin_unlock(&lpfc_cmd->buf_lock);
12847 /* Note: both hbalock and ring_lock must be set here */
12848 spin_lock(&pring_s4->ring_lock);
12852 * If the iocbq is already being aborted, don't take a second
12853 * action, but do count it.
12855 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12856 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12857 if (phba->sli_rev == LPFC_SLI_REV4)
12858 spin_unlock(&pring_s4->ring_lock);
12859 spin_unlock(&lpfc_cmd->buf_lock);
12863 /* issue ABTS for this IOCB based on iotag */
12864 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12866 if (phba->sli_rev == LPFC_SLI_REV4)
12867 spin_unlock(&pring_s4->ring_lock);
12868 spin_unlock(&lpfc_cmd->buf_lock);
12872 icmd = &iocbq->iocb;
12873 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12874 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12875 if (phba->sli_rev == LPFC_SLI_REV4)
12876 abtsiocbq->iocb.un.acxri.abortIoTag =
12877 iocbq->sli4_xritag;
12879 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12880 abtsiocbq->iocb.ulpLe = 1;
12881 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12882 abtsiocbq->vport = vport;
12884 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12885 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12886 if (iocbq->iocb_flag & LPFC_IO_FCP)
12887 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
12888 if (iocbq->iocb_flag & LPFC_IO_FOF)
12889 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
12891 ndlp = lpfc_cmd->rdata->pnode;
12893 if (lpfc_is_link_up(phba) &&
12894 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12895 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12897 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12899 /* Setup callback routine and issue the command. */
12900 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12903 * Indicate the IO is being aborted by the driver and set
12904 * the caller's flag into the aborted IO.
12906 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12908 if (phba->sli_rev == LPFC_SLI_REV4) {
12909 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12911 spin_unlock(&pring_s4->ring_lock);
12913 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12917 spin_unlock(&lpfc_cmd->buf_lock);
12919 if (ret_val == IOCB_ERROR)
12920 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12924 spin_unlock_irqrestore(&phba->hbalock, iflags);
12929 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12930 * @phba: Pointer to HBA context object.
12931 * @cmdiocbq: Pointer to command iocb.
12932 * @rspiocbq: Pointer to response iocb.
12934 * This function is the completion handler for iocbs issued using
12935 * lpfc_sli_issue_iocb_wait function. This function is called by the
12936 * ring event handler function without any lock held. This function
12937 * can be called from both worker thread context and interrupt
12938 * context. This function also can be called from other thread which
12939 * cleans up the SLI layer objects.
12940 * This function copy the contents of the response iocb to the
12941 * response iocb memory object provided by the caller of
12942 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12943 * sleeps for the iocb completion.
12946 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12947 struct lpfc_iocbq *cmdiocbq,
12948 struct lpfc_iocbq *rspiocbq)
12950 wait_queue_head_t *pdone_q;
12951 unsigned long iflags;
12952 struct lpfc_io_buf *lpfc_cmd;
12954 spin_lock_irqsave(&phba->hbalock, iflags);
12955 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12958 * A time out has occurred for the iocb. If a time out
12959 * completion handler has been supplied, call it. Otherwise,
12960 * just free the iocbq.
12963 spin_unlock_irqrestore(&phba->hbalock, iflags);
12964 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12965 cmdiocbq->wait_iocb_cmpl = NULL;
12966 if (cmdiocbq->iocb_cmpl)
12967 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12969 lpfc_sli_release_iocbq(phba, cmdiocbq);
12973 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12974 if (cmdiocbq->context2 && rspiocbq)
12975 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12976 &rspiocbq->iocb, sizeof(IOCB_t));
12978 /* Set the exchange busy flag for task management commands */
12979 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12980 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12981 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12983 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12984 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12986 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12989 pdone_q = cmdiocbq->context_un.wait_queue;
12992 spin_unlock_irqrestore(&phba->hbalock, iflags);
12997 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12998 * @phba: Pointer to HBA context object..
12999 * @piocbq: Pointer to command iocb.
13000 * @flag: Flag to test.
13002 * This routine grabs the hbalock and then test the iocb_flag to
13003 * see if the passed in flag is set.
13005 * 1 if flag is set.
13006 * 0 if flag is not set.
13009 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13010 struct lpfc_iocbq *piocbq, uint32_t flag)
13012 unsigned long iflags;
13015 spin_lock_irqsave(&phba->hbalock, iflags);
13016 ret = piocbq->iocb_flag & flag;
13017 spin_unlock_irqrestore(&phba->hbalock, iflags);
13023 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13024 * @phba: Pointer to HBA context object..
13025 * @ring_number: Ring number
13026 * @piocb: Pointer to command iocb.
13027 * @prspiocbq: Pointer to response iocb.
13028 * @timeout: Timeout in number of seconds.
13030 * This function issues the iocb to firmware and waits for the
13031 * iocb to complete. The iocb_cmpl field of the shall be used
13032 * to handle iocbs which time out. If the field is NULL, the
13033 * function shall free the iocbq structure. If more clean up is
13034 * needed, the caller is expected to provide a completion function
13035 * that will provide the needed clean up. If the iocb command is
13036 * not completed within timeout seconds, the function will either
13037 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
13038 * completion function set in the iocb_cmpl field and then return
13039 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
13040 * resources if this function returns IOCB_TIMEDOUT.
13041 * The function waits for the iocb completion using an
13042 * non-interruptible wait.
13043 * This function will sleep while waiting for iocb completion.
13044 * So, this function should not be called from any context which
13045 * does not allow sleeping. Due to the same reason, this function
13046 * cannot be called with interrupt disabled.
13047 * This function assumes that the iocb completions occur while
13048 * this function sleep. So, this function cannot be called from
13049 * the thread which process iocb completion for this ring.
13050 * This function clears the iocb_flag of the iocb object before
13051 * issuing the iocb and the iocb completion handler sets this
13052 * flag and wakes this thread when the iocb completes.
13053 * The contents of the response iocb will be copied to prspiocbq
13054 * by the completion handler when the command completes.
13055 * This function returns IOCB_SUCCESS when success.
13056 * This function is called with no lock held.
13059 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13060 uint32_t ring_number,
13061 struct lpfc_iocbq *piocb,
13062 struct lpfc_iocbq *prspiocbq,
13065 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13066 long timeleft, timeout_req = 0;
13067 int retval = IOCB_SUCCESS;
13069 struct lpfc_iocbq *iocb;
13071 int txcmplq_cnt = 0;
13072 struct lpfc_sli_ring *pring;
13073 unsigned long iflags;
13074 bool iocb_completed = true;
13076 if (phba->sli_rev >= LPFC_SLI_REV4)
13077 pring = lpfc_sli4_calc_ring(phba, piocb);
13079 pring = &phba->sli.sli3_ring[ring_number];
13081 * If the caller has provided a response iocbq buffer, then context2
13082 * is NULL or its an error.
13085 if (piocb->context2)
13087 piocb->context2 = prspiocbq;
13090 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
13091 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
13092 piocb->context_un.wait_queue = &done_q;
13093 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13095 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13096 if (lpfc_readl(phba->HCregaddr, &creg_val))
13098 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13099 writel(creg_val, phba->HCregaddr);
13100 readl(phba->HCregaddr); /* flush */
13103 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13104 SLI_IOCB_RET_IOCB);
13105 if (retval == IOCB_SUCCESS) {
13106 timeout_req = msecs_to_jiffies(timeout * 1000);
13107 timeleft = wait_event_timeout(done_q,
13108 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13110 spin_lock_irqsave(&phba->hbalock, iflags);
13111 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
13114 * IOCB timed out. Inform the wake iocb wait
13115 * completion function and set local status
13118 iocb_completed = false;
13119 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
13121 spin_unlock_irqrestore(&phba->hbalock, iflags);
13122 if (iocb_completed) {
13123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13124 "0331 IOCB wake signaled\n");
13125 /* Note: we are not indicating if the IOCB has a success
13126 * status or not - that's for the caller to check.
13127 * IOCB_SUCCESS means just that the command was sent and
13128 * completed. Not that it completed successfully.
13130 } else if (timeleft == 0) {
13131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13132 "0338 IOCB wait timeout error - no "
13133 "wake response Data x%x\n", timeout);
13134 retval = IOCB_TIMEDOUT;
13136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13137 "0330 IOCB wake NOT set, "
13139 timeout, (timeleft / jiffies));
13140 retval = IOCB_TIMEDOUT;
13142 } else if (retval == IOCB_BUSY) {
13143 if (phba->cfg_log_verbose & LOG_SLI) {
13144 list_for_each_entry(iocb, &pring->txq, list) {
13147 list_for_each_entry(iocb, &pring->txcmplq, list) {
13150 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13151 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13152 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13156 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13157 "0332 IOCB wait issue failed, Data x%x\n",
13159 retval = IOCB_ERROR;
13162 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13163 if (lpfc_readl(phba->HCregaddr, &creg_val))
13165 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13166 writel(creg_val, phba->HCregaddr);
13167 readl(phba->HCregaddr); /* flush */
13171 piocb->context2 = NULL;
13173 piocb->context_un.wait_queue = NULL;
13174 piocb->iocb_cmpl = NULL;
13179 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13180 * @phba: Pointer to HBA context object.
13181 * @pmboxq: Pointer to driver mailbox object.
13182 * @timeout: Timeout in number of seconds.
13184 * This function issues the mailbox to firmware and waits for the
13185 * mailbox command to complete. If the mailbox command is not
13186 * completed within timeout seconds, it returns MBX_TIMEOUT.
13187 * The function waits for the mailbox completion using an
13188 * interruptible wait. If the thread is woken up due to a
13189 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13190 * should not free the mailbox resources, if this function returns
13192 * This function will sleep while waiting for mailbox completion.
13193 * So, this function should not be called from any context which
13194 * does not allow sleeping. Due to the same reason, this function
13195 * cannot be called with interrupt disabled.
13196 * This function assumes that the mailbox completion occurs while
13197 * this function sleep. So, this function cannot be called from
13198 * the worker thread which processes mailbox completion.
13199 * This function is called in the context of HBA management
13201 * This function returns MBX_SUCCESS when successful.
13202 * This function is called with no lock held.
13205 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13208 struct completion mbox_done;
13210 unsigned long flag;
13212 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13213 /* setup wake call as IOCB callback */
13214 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13216 /* setup context3 field to pass wait_queue pointer to wake function */
13217 init_completion(&mbox_done);
13218 pmboxq->context3 = &mbox_done;
13219 /* now issue the command */
13220 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13221 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13222 wait_for_completion_timeout(&mbox_done,
13223 msecs_to_jiffies(timeout * 1000));
13225 spin_lock_irqsave(&phba->hbalock, flag);
13226 pmboxq->context3 = NULL;
13228 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13229 * else do not free the resources.
13231 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13232 retval = MBX_SUCCESS;
13234 retval = MBX_TIMEOUT;
13235 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13237 spin_unlock_irqrestore(&phba->hbalock, flag);
13243 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13244 * @phba: Pointer to HBA context.
13245 * @mbx_action: Mailbox shutdown options.
13247 * This function is called to shutdown the driver's mailbox sub-system.
13248 * It first marks the mailbox sub-system is in a block state to prevent
13249 * the asynchronous mailbox command from issued off the pending mailbox
13250 * command queue. If the mailbox command sub-system shutdown is due to
13251 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13252 * the mailbox sub-system flush routine to forcefully bring down the
13253 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13254 * as with offline or HBA function reset), this routine will wait for the
13255 * outstanding mailbox command to complete before invoking the mailbox
13256 * sub-system flush routine to gracefully bring down mailbox sub-system.
13259 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13261 struct lpfc_sli *psli = &phba->sli;
13262 unsigned long timeout;
13264 if (mbx_action == LPFC_MBX_NO_WAIT) {
13265 /* delay 100ms for port state */
13267 lpfc_sli_mbox_sys_flush(phba);
13270 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13272 /* Disable softirqs, including timers from obtaining phba->hbalock */
13273 local_bh_disable();
13275 spin_lock_irq(&phba->hbalock);
13276 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13278 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13279 /* Determine how long we might wait for the active mailbox
13280 * command to be gracefully completed by firmware.
13282 if (phba->sli.mbox_active)
13283 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13284 phba->sli.mbox_active) *
13286 spin_unlock_irq(&phba->hbalock);
13288 /* Enable softirqs again, done with phba->hbalock */
13291 while (phba->sli.mbox_active) {
13292 /* Check active mailbox complete status every 2ms */
13294 if (time_after(jiffies, timeout))
13295 /* Timeout, let the mailbox flush routine to
13296 * forcefully release active mailbox command
13301 spin_unlock_irq(&phba->hbalock);
13303 /* Enable softirqs again, done with phba->hbalock */
13307 lpfc_sli_mbox_sys_flush(phba);
13311 * lpfc_sli_eratt_read - read sli-3 error attention events
13312 * @phba: Pointer to HBA context.
13314 * This function is called to read the SLI3 device error attention registers
13315 * for possible error attention events. The caller must hold the hostlock
13316 * with spin_lock_irq().
13318 * This function returns 1 when there is Error Attention in the Host Attention
13319 * Register and returns 0 otherwise.
13322 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13326 /* Read chip Host Attention (HA) register */
13327 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13330 if (ha_copy & HA_ERATT) {
13331 /* Read host status register to retrieve error event */
13332 if (lpfc_sli_read_hs(phba))
13335 /* Check if there is a deferred error condition is active */
13336 if ((HS_FFER1 & phba->work_hs) &&
13337 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13338 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13339 phba->hba_flag |= DEFER_ERATT;
13340 /* Clear all interrupt enable conditions */
13341 writel(0, phba->HCregaddr);
13342 readl(phba->HCregaddr);
13345 /* Set the driver HA work bitmap */
13346 phba->work_ha |= HA_ERATT;
13347 /* Indicate polling handles this ERATT */
13348 phba->hba_flag |= HBA_ERATT_HANDLED;
13354 /* Set the driver HS work bitmap */
13355 phba->work_hs |= UNPLUG_ERR;
13356 /* Set the driver HA work bitmap */
13357 phba->work_ha |= HA_ERATT;
13358 /* Indicate polling handles this ERATT */
13359 phba->hba_flag |= HBA_ERATT_HANDLED;
13364 * lpfc_sli4_eratt_read - read sli-4 error attention events
13365 * @phba: Pointer to HBA context.
13367 * This function is called to read the SLI4 device error attention registers
13368 * for possible error attention events. The caller must hold the hostlock
13369 * with spin_lock_irq().
13371 * This function returns 1 when there is Error Attention in the Host Attention
13372 * Register and returns 0 otherwise.
13375 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13377 uint32_t uerr_sta_hi, uerr_sta_lo;
13378 uint32_t if_type, portsmphr;
13379 struct lpfc_register portstat_reg;
13383 * For now, use the SLI4 device internal unrecoverable error
13384 * registers for error attention. This can be changed later.
13386 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13388 case LPFC_SLI_INTF_IF_TYPE_0:
13389 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13391 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13393 phba->work_hs |= UNPLUG_ERR;
13394 phba->work_ha |= HA_ERATT;
13395 phba->hba_flag |= HBA_ERATT_HANDLED;
13398 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13399 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13401 "1423 HBA Unrecoverable error: "
13402 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13403 "ue_mask_lo_reg=0x%x, "
13404 "ue_mask_hi_reg=0x%x\n",
13405 uerr_sta_lo, uerr_sta_hi,
13406 phba->sli4_hba.ue_mask_lo,
13407 phba->sli4_hba.ue_mask_hi);
13408 phba->work_status[0] = uerr_sta_lo;
13409 phba->work_status[1] = uerr_sta_hi;
13410 phba->work_ha |= HA_ERATT;
13411 phba->hba_flag |= HBA_ERATT_HANDLED;
13415 case LPFC_SLI_INTF_IF_TYPE_2:
13416 case LPFC_SLI_INTF_IF_TYPE_6:
13417 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13418 &portstat_reg.word0) ||
13419 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13421 phba->work_hs |= UNPLUG_ERR;
13422 phba->work_ha |= HA_ERATT;
13423 phba->hba_flag |= HBA_ERATT_HANDLED;
13426 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13427 phba->work_status[0] =
13428 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13429 phba->work_status[1] =
13430 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13431 logmask = LOG_TRACE_EVENT;
13432 if (phba->work_status[0] ==
13433 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13434 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13436 lpfc_printf_log(phba, KERN_ERR, logmask,
13437 "2885 Port Status Event: "
13438 "port status reg 0x%x, "
13439 "port smphr reg 0x%x, "
13440 "error 1=0x%x, error 2=0x%x\n",
13441 portstat_reg.word0,
13443 phba->work_status[0],
13444 phba->work_status[1]);
13445 phba->work_ha |= HA_ERATT;
13446 phba->hba_flag |= HBA_ERATT_HANDLED;
13450 case LPFC_SLI_INTF_IF_TYPE_1:
13452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13453 "2886 HBA Error Attention on unsupported "
13454 "if type %d.", if_type);
13462 * lpfc_sli_check_eratt - check error attention events
13463 * @phba: Pointer to HBA context.
13465 * This function is called from timer soft interrupt context to check HBA's
13466 * error attention register bit for error attention events.
13468 * This function returns 1 when there is Error Attention in the Host Attention
13469 * Register and returns 0 otherwise.
13472 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13476 /* If somebody is waiting to handle an eratt, don't process it
13477 * here. The brdkill function will do this.
13479 if (phba->link_flag & LS_IGNORE_ERATT)
13482 /* Check if interrupt handler handles this ERATT */
13483 spin_lock_irq(&phba->hbalock);
13484 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13485 /* Interrupt handler has handled ERATT */
13486 spin_unlock_irq(&phba->hbalock);
13491 * If there is deferred error attention, do not check for error
13494 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13495 spin_unlock_irq(&phba->hbalock);
13499 /* If PCI channel is offline, don't process it */
13500 if (unlikely(pci_channel_offline(phba->pcidev))) {
13501 spin_unlock_irq(&phba->hbalock);
13505 switch (phba->sli_rev) {
13506 case LPFC_SLI_REV2:
13507 case LPFC_SLI_REV3:
13508 /* Read chip Host Attention (HA) register */
13509 ha_copy = lpfc_sli_eratt_read(phba);
13511 case LPFC_SLI_REV4:
13512 /* Read device Uncoverable Error (UERR) registers */
13513 ha_copy = lpfc_sli4_eratt_read(phba);
13516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13517 "0299 Invalid SLI revision (%d)\n",
13522 spin_unlock_irq(&phba->hbalock);
13528 * lpfc_intr_state_check - Check device state for interrupt handling
13529 * @phba: Pointer to HBA context.
13531 * This inline routine checks whether a device or its PCI slot is in a state
13532 * that the interrupt should be handled.
13534 * This function returns 0 if the device or the PCI slot is in a state that
13535 * interrupt should be handled, otherwise -EIO.
13538 lpfc_intr_state_check(struct lpfc_hba *phba)
13540 /* If the pci channel is offline, ignore all the interrupts */
13541 if (unlikely(pci_channel_offline(phba->pcidev)))
13544 /* Update device level interrupt statistics */
13545 phba->sli.slistat.sli_intr++;
13547 /* Ignore all interrupts during initialization. */
13548 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13555 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13556 * @irq: Interrupt number.
13557 * @dev_id: The device context pointer.
13559 * This function is directly called from the PCI layer as an interrupt
13560 * service routine when device with SLI-3 interface spec is enabled with
13561 * MSI-X multi-message interrupt mode and there are slow-path events in
13562 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13563 * interrupt mode, this function is called as part of the device-level
13564 * interrupt handler. When the PCI slot is in error recovery or the HBA
13565 * is undergoing initialization, the interrupt handler will not process
13566 * the interrupt. The link attention and ELS ring attention events are
13567 * handled by the worker thread. The interrupt handler signals the worker
13568 * thread and returns for these events. This function is called without
13569 * any lock held. It gets the hbalock to access and update SLI data
13572 * This function returns IRQ_HANDLED when interrupt is handled else it
13573 * returns IRQ_NONE.
13576 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13578 struct lpfc_hba *phba;
13579 uint32_t ha_copy, hc_copy;
13580 uint32_t work_ha_copy;
13581 unsigned long status;
13582 unsigned long iflag;
13585 MAILBOX_t *mbox, *pmbox;
13586 struct lpfc_vport *vport;
13587 struct lpfc_nodelist *ndlp;
13588 struct lpfc_dmabuf *mp;
13593 * Get the driver's phba structure from the dev_id and
13594 * assume the HBA is not interrupting.
13596 phba = (struct lpfc_hba *)dev_id;
13598 if (unlikely(!phba))
13602 * Stuff needs to be attented to when this function is invoked as an
13603 * individual interrupt handler in MSI-X multi-message interrupt mode
13605 if (phba->intr_type == MSIX) {
13606 /* Check device state for handling interrupt */
13607 if (lpfc_intr_state_check(phba))
13609 /* Need to read HA REG for slow-path events */
13610 spin_lock_irqsave(&phba->hbalock, iflag);
13611 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13613 /* If somebody is waiting to handle an eratt don't process it
13614 * here. The brdkill function will do this.
13616 if (phba->link_flag & LS_IGNORE_ERATT)
13617 ha_copy &= ~HA_ERATT;
13618 /* Check the need for handling ERATT in interrupt handler */
13619 if (ha_copy & HA_ERATT) {
13620 if (phba->hba_flag & HBA_ERATT_HANDLED)
13621 /* ERATT polling has handled ERATT */
13622 ha_copy &= ~HA_ERATT;
13624 /* Indicate interrupt handler handles ERATT */
13625 phba->hba_flag |= HBA_ERATT_HANDLED;
13629 * If there is deferred error attention, do not check for any
13632 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13633 spin_unlock_irqrestore(&phba->hbalock, iflag);
13637 /* Clear up only attention source related to slow-path */
13638 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13641 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13642 HC_LAINT_ENA | HC_ERINT_ENA),
13644 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13646 writel(hc_copy, phba->HCregaddr);
13647 readl(phba->HAregaddr); /* flush */
13648 spin_unlock_irqrestore(&phba->hbalock, iflag);
13650 ha_copy = phba->ha_copy;
13652 work_ha_copy = ha_copy & phba->work_ha_mask;
13654 if (work_ha_copy) {
13655 if (work_ha_copy & HA_LATT) {
13656 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13658 * Turn off Link Attention interrupts
13659 * until CLEAR_LA done
13661 spin_lock_irqsave(&phba->hbalock, iflag);
13662 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13663 if (lpfc_readl(phba->HCregaddr, &control))
13665 control &= ~HC_LAINT_ENA;
13666 writel(control, phba->HCregaddr);
13667 readl(phba->HCregaddr); /* flush */
13668 spin_unlock_irqrestore(&phba->hbalock, iflag);
13671 work_ha_copy &= ~HA_LATT;
13674 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13676 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13677 * the only slow ring.
13679 status = (work_ha_copy &
13680 (HA_RXMASK << (4*LPFC_ELS_RING)));
13681 status >>= (4*LPFC_ELS_RING);
13682 if (status & HA_RXMASK) {
13683 spin_lock_irqsave(&phba->hbalock, iflag);
13684 if (lpfc_readl(phba->HCregaddr, &control))
13687 lpfc_debugfs_slow_ring_trc(phba,
13688 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13690 (uint32_t)phba->sli.slistat.sli_intr);
13692 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13693 lpfc_debugfs_slow_ring_trc(phba,
13694 "ISR Disable ring:"
13695 "pwork:x%x hawork:x%x wait:x%x",
13696 phba->work_ha, work_ha_copy,
13697 (uint32_t)((unsigned long)
13698 &phba->work_waitq));
13701 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13702 writel(control, phba->HCregaddr);
13703 readl(phba->HCregaddr); /* flush */
13706 lpfc_debugfs_slow_ring_trc(phba,
13707 "ISR slow ring: pwork:"
13708 "x%x hawork:x%x wait:x%x",
13709 phba->work_ha, work_ha_copy,
13710 (uint32_t)((unsigned long)
13711 &phba->work_waitq));
13713 spin_unlock_irqrestore(&phba->hbalock, iflag);
13716 spin_lock_irqsave(&phba->hbalock, iflag);
13717 if (work_ha_copy & HA_ERATT) {
13718 if (lpfc_sli_read_hs(phba))
13721 * Check if there is a deferred error condition
13724 if ((HS_FFER1 & phba->work_hs) &&
13725 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13726 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13728 phba->hba_flag |= DEFER_ERATT;
13729 /* Clear all interrupt enable conditions */
13730 writel(0, phba->HCregaddr);
13731 readl(phba->HCregaddr);
13735 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13736 pmb = phba->sli.mbox_active;
13737 pmbox = &pmb->u.mb;
13739 vport = pmb->vport;
13741 /* First check out the status word */
13742 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13743 if (pmbox->mbxOwner != OWN_HOST) {
13744 spin_unlock_irqrestore(&phba->hbalock, iflag);
13746 * Stray Mailbox Interrupt, mbxCommand <cmd>
13747 * mbxStatus <status>
13749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13750 "(%d):0304 Stray Mailbox "
13751 "Interrupt mbxCommand x%x "
13753 (vport ? vport->vpi : 0),
13756 /* clear mailbox attention bit */
13757 work_ha_copy &= ~HA_MBATT;
13759 phba->sli.mbox_active = NULL;
13760 spin_unlock_irqrestore(&phba->hbalock, iflag);
13761 phba->last_completion_time = jiffies;
13762 del_timer(&phba->sli.mbox_tmo);
13763 if (pmb->mbox_cmpl) {
13764 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13766 if (pmb->out_ext_byte_len &&
13768 lpfc_sli_pcimem_bcopy(
13771 pmb->out_ext_byte_len);
13773 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13774 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13776 lpfc_debugfs_disc_trc(vport,
13777 LPFC_DISC_TRC_MBOX_VPORT,
13778 "MBOX dflt rpi: : "
13779 "status:x%x rpi:x%x",
13780 (uint32_t)pmbox->mbxStatus,
13781 pmbox->un.varWords[0], 0);
13783 if (!pmbox->mbxStatus) {
13784 mp = (struct lpfc_dmabuf *)
13786 ndlp = (struct lpfc_nodelist *)
13789 /* Reg_LOGIN of dflt RPI was
13790 * successful. new lets get
13791 * rid of the RPI using the
13792 * same mbox buffer.
13794 lpfc_unreg_login(phba,
13796 pmbox->un.varWords[0],
13799 lpfc_mbx_cmpl_dflt_rpi;
13801 pmb->ctx_ndlp = ndlp;
13802 pmb->vport = vport;
13803 rc = lpfc_sli_issue_mbox(phba,
13806 if (rc != MBX_BUSY)
13807 lpfc_printf_log(phba,
13810 "0350 rc should have"
13811 "been MBX_BUSY\n");
13812 if (rc != MBX_NOT_FINISHED)
13813 goto send_current_mbox;
13817 &phba->pport->work_port_lock,
13819 phba->pport->work_port_events &=
13821 spin_unlock_irqrestore(
13822 &phba->pport->work_port_lock,
13825 /* Do NOT queue MBX_HEARTBEAT to the worker
13826 * thread for processing.
13828 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13829 /* Process mbox now */
13830 phba->sli.mbox_active = NULL;
13831 phba->sli.sli_flag &=
13832 ~LPFC_SLI_MBOX_ACTIVE;
13833 if (pmb->mbox_cmpl)
13834 pmb->mbox_cmpl(phba, pmb);
13836 /* Queue to worker thread to process */
13837 lpfc_mbox_cmpl_put(phba, pmb);
13841 spin_unlock_irqrestore(&phba->hbalock, iflag);
13843 if ((work_ha_copy & HA_MBATT) &&
13844 (phba->sli.mbox_active == NULL)) {
13846 /* Process next mailbox command if there is one */
13848 rc = lpfc_sli_issue_mbox(phba, NULL,
13850 } while (rc == MBX_NOT_FINISHED);
13851 if (rc != MBX_SUCCESS)
13852 lpfc_printf_log(phba, KERN_ERR,
13854 "0349 rc should be "
13858 spin_lock_irqsave(&phba->hbalock, iflag);
13859 phba->work_ha |= work_ha_copy;
13860 spin_unlock_irqrestore(&phba->hbalock, iflag);
13861 lpfc_worker_wake_up(phba);
13863 return IRQ_HANDLED;
13865 spin_unlock_irqrestore(&phba->hbalock, iflag);
13866 return IRQ_HANDLED;
13868 } /* lpfc_sli_sp_intr_handler */
13871 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13872 * @irq: Interrupt number.
13873 * @dev_id: The device context pointer.
13875 * This function is directly called from the PCI layer as an interrupt
13876 * service routine when device with SLI-3 interface spec is enabled with
13877 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13878 * ring event in the HBA. However, when the device is enabled with either
13879 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13880 * device-level interrupt handler. When the PCI slot is in error recovery
13881 * or the HBA is undergoing initialization, the interrupt handler will not
13882 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13883 * the intrrupt context. This function is called without any lock held.
13884 * It gets the hbalock to access and update SLI data structures.
13886 * This function returns IRQ_HANDLED when interrupt is handled else it
13887 * returns IRQ_NONE.
13890 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13892 struct lpfc_hba *phba;
13894 unsigned long status;
13895 unsigned long iflag;
13896 struct lpfc_sli_ring *pring;
13898 /* Get the driver's phba structure from the dev_id and
13899 * assume the HBA is not interrupting.
13901 phba = (struct lpfc_hba *) dev_id;
13903 if (unlikely(!phba))
13907 * Stuff needs to be attented to when this function is invoked as an
13908 * individual interrupt handler in MSI-X multi-message interrupt mode
13910 if (phba->intr_type == MSIX) {
13911 /* Check device state for handling interrupt */
13912 if (lpfc_intr_state_check(phba))
13914 /* Need to read HA REG for FCP ring and other ring events */
13915 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13916 return IRQ_HANDLED;
13917 /* Clear up only attention source related to fast-path */
13918 spin_lock_irqsave(&phba->hbalock, iflag);
13920 * If there is deferred error attention, do not check for
13923 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13924 spin_unlock_irqrestore(&phba->hbalock, iflag);
13927 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13929 readl(phba->HAregaddr); /* flush */
13930 spin_unlock_irqrestore(&phba->hbalock, iflag);
13932 ha_copy = phba->ha_copy;
13935 * Process all events on FCP ring. Take the optimized path for FCP IO.
13937 ha_copy &= ~(phba->work_ha_mask);
13939 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13940 status >>= (4*LPFC_FCP_RING);
13941 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13942 if (status & HA_RXMASK)
13943 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13945 if (phba->cfg_multi_ring_support == 2) {
13947 * Process all events on extra ring. Take the optimized path
13948 * for extra ring IO.
13950 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13951 status >>= (4*LPFC_EXTRA_RING);
13952 if (status & HA_RXMASK) {
13953 lpfc_sli_handle_fast_ring_event(phba,
13954 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13958 return IRQ_HANDLED;
13959 } /* lpfc_sli_fp_intr_handler */
13962 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13963 * @irq: Interrupt number.
13964 * @dev_id: The device context pointer.
13966 * This function is the HBA device-level interrupt handler to device with
13967 * SLI-3 interface spec, called from the PCI layer when either MSI or
13968 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13969 * requires driver attention. This function invokes the slow-path interrupt
13970 * attention handling function and fast-path interrupt attention handling
13971 * function in turn to process the relevant HBA attention events. This
13972 * function is called without any lock held. It gets the hbalock to access
13973 * and update SLI data structures.
13975 * This function returns IRQ_HANDLED when interrupt is handled, else it
13976 * returns IRQ_NONE.
13979 lpfc_sli_intr_handler(int irq, void *dev_id)
13981 struct lpfc_hba *phba;
13982 irqreturn_t sp_irq_rc, fp_irq_rc;
13983 unsigned long status1, status2;
13987 * Get the driver's phba structure from the dev_id and
13988 * assume the HBA is not interrupting.
13990 phba = (struct lpfc_hba *) dev_id;
13992 if (unlikely(!phba))
13995 /* Check device state for handling interrupt */
13996 if (lpfc_intr_state_check(phba))
13999 spin_lock(&phba->hbalock);
14000 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14001 spin_unlock(&phba->hbalock);
14002 return IRQ_HANDLED;
14005 if (unlikely(!phba->ha_copy)) {
14006 spin_unlock(&phba->hbalock);
14008 } else if (phba->ha_copy & HA_ERATT) {
14009 if (phba->hba_flag & HBA_ERATT_HANDLED)
14010 /* ERATT polling has handled ERATT */
14011 phba->ha_copy &= ~HA_ERATT;
14013 /* Indicate interrupt handler handles ERATT */
14014 phba->hba_flag |= HBA_ERATT_HANDLED;
14018 * If there is deferred error attention, do not check for any interrupt.
14020 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14021 spin_unlock(&phba->hbalock);
14025 /* Clear attention sources except link and error attentions */
14026 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14027 spin_unlock(&phba->hbalock);
14028 return IRQ_HANDLED;
14030 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14031 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14033 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14034 writel(hc_copy, phba->HCregaddr);
14035 readl(phba->HAregaddr); /* flush */
14036 spin_unlock(&phba->hbalock);
14039 * Invokes slow-path host attention interrupt handling as appropriate.
14042 /* status of events with mailbox and link attention */
14043 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14045 /* status of events with ELS ring */
14046 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
14047 status2 >>= (4*LPFC_ELS_RING);
14049 if (status1 || (status2 & HA_RXMASK))
14050 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14052 sp_irq_rc = IRQ_NONE;
14055 * Invoke fast-path host attention interrupt handling as appropriate.
14058 /* status of events with FCP ring */
14059 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14060 status1 >>= (4*LPFC_FCP_RING);
14062 /* status of events with extra ring */
14063 if (phba->cfg_multi_ring_support == 2) {
14064 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14065 status2 >>= (4*LPFC_EXTRA_RING);
14069 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14070 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14072 fp_irq_rc = IRQ_NONE;
14074 /* Return device-level interrupt handling status */
14075 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14076 } /* lpfc_sli_intr_handler */
14079 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14080 * @phba: pointer to lpfc hba data structure.
14082 * This routine is invoked by the worker thread to process all the pending
14083 * SLI4 els abort xri events.
14085 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14087 struct lpfc_cq_event *cq_event;
14088 unsigned long iflags;
14090 /* First, declare the els xri abort event has been handled */
14091 spin_lock_irqsave(&phba->hbalock, iflags);
14092 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14093 spin_unlock_irqrestore(&phba->hbalock, iflags);
14095 /* Now, handle all the els xri abort events */
14096 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14097 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14098 /* Get the first event from the head of the event queue */
14099 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14100 cq_event, struct lpfc_cq_event, list);
14101 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14103 /* Notify aborted XRI for ELS work queue */
14104 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14106 /* Free the event processed back to the free pool */
14107 lpfc_sli4_cq_event_release(phba, cq_event);
14108 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14111 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14115 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
14116 * @phba: pointer to lpfc hba data structure
14117 * @pIocbIn: pointer to the rspiocbq
14118 * @pIocbOut: pointer to the cmdiocbq
14119 * @wcqe: pointer to the complete wcqe
14121 * This routine transfers the fields of a command iocbq to a response iocbq
14122 * by copying all the IOCB fields from command iocbq and transferring the
14123 * completion status information from the complete wcqe.
14126 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
14127 struct lpfc_iocbq *pIocbIn,
14128 struct lpfc_iocbq *pIocbOut,
14129 struct lpfc_wcqe_complete *wcqe)
14132 unsigned long iflags;
14133 uint32_t status, max_response;
14134 struct lpfc_dmabuf *dmabuf;
14135 struct ulp_bde64 *bpl, bde;
14136 size_t offset = offsetof(struct lpfc_iocbq, iocb);
14138 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
14139 sizeof(struct lpfc_iocbq) - offset);
14140 /* Map WCQE parameters into irspiocb parameters */
14141 status = bf_get(lpfc_wcqe_c_status, wcqe);
14142 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
14143 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
14144 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
14145 pIocbIn->iocb.un.fcpi.fcpi_parm =
14146 pIocbOut->iocb.un.fcpi.fcpi_parm -
14147 wcqe->total_data_placed;
14149 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
14151 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
14152 switch (pIocbOut->iocb.ulpCommand) {
14153 case CMD_ELS_REQUEST64_CR:
14154 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
14155 bpl = (struct ulp_bde64 *)dmabuf->virt;
14156 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
14157 max_response = bde.tus.f.bdeSize;
14159 case CMD_GEN_REQUEST64_CR:
14161 if (!pIocbOut->context3)
14163 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
14164 sizeof(struct ulp_bde64);
14165 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
14166 bpl = (struct ulp_bde64 *)dmabuf->virt;
14167 for (i = 0; i < numBdes; i++) {
14168 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
14169 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
14170 max_response += bde.tus.f.bdeSize;
14174 max_response = wcqe->total_data_placed;
14177 if (max_response < wcqe->total_data_placed)
14178 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
14180 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
14181 wcqe->total_data_placed;
14184 /* Convert BG errors for completion status */
14185 if (status == CQE_STATUS_DI_ERROR) {
14186 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
14188 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
14189 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
14191 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
14193 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
14194 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
14195 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
14196 BGS_GUARD_ERR_MASK;
14197 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
14198 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
14199 BGS_APPTAG_ERR_MASK;
14200 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
14201 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
14202 BGS_REFTAG_ERR_MASK;
14204 /* Check to see if there was any good data before the error */
14205 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
14206 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
14207 BGS_HI_WATER_MARK_PRESENT_MASK;
14208 pIocbIn->iocb.unsli3.sli3_bg.bghm =
14209 wcqe->total_data_placed;
14213 * Set ALL the error bits to indicate we don't know what
14214 * type of error it is.
14216 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
14217 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
14218 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
14219 BGS_GUARD_ERR_MASK);
14222 /* Pick up HBA exchange busy condition */
14223 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14224 spin_lock_irqsave(&phba->hbalock, iflags);
14225 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
14226 spin_unlock_irqrestore(&phba->hbalock, iflags);
14231 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
14232 * @phba: Pointer to HBA context object.
14233 * @irspiocbq: Pointer to work-queue completion queue entry.
14235 * This routine handles an ELS work-queue completion event and construct
14236 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
14237 * discovery engine to handle.
14239 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14241 static struct lpfc_iocbq *
14242 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
14243 struct lpfc_iocbq *irspiocbq)
14245 struct lpfc_sli_ring *pring;
14246 struct lpfc_iocbq *cmdiocbq;
14247 struct lpfc_wcqe_complete *wcqe;
14248 unsigned long iflags;
14250 pring = lpfc_phba_elsring(phba);
14251 if (unlikely(!pring))
14254 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14255 pring->stats.iocb_event++;
14256 /* Look up the ELS command IOCB and create pseudo response IOCB */
14257 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14258 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14259 if (unlikely(!cmdiocbq)) {
14260 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14261 "0386 ELS complete with no corresponding "
14262 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14263 wcqe->word0, wcqe->total_data_placed,
14264 wcqe->parameter, wcqe->word3);
14265 lpfc_sli_release_iocbq(phba, irspiocbq);
14269 spin_lock_irqsave(&pring->ring_lock, iflags);
14270 /* Put the iocb back on the txcmplq */
14271 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14272 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14274 /* Fake the irspiocbq and copy necessary response information */
14275 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
14280 inline struct lpfc_cq_event *
14281 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14283 struct lpfc_cq_event *cq_event;
14285 /* Allocate a new internal CQ_EVENT entry */
14286 cq_event = lpfc_sli4_cq_event_alloc(phba);
14288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14289 "0602 Failed to alloc CQ_EVENT entry\n");
14293 /* Move the CQE into the event */
14294 memcpy(&cq_event->cqe, entry, size);
14299 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14300 * @phba: Pointer to HBA context object.
14301 * @mcqe: Pointer to mailbox completion queue entry.
14303 * This routine process a mailbox completion queue entry with asynchronous
14306 * Return: true if work posted to worker thread, otherwise false.
14309 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14311 struct lpfc_cq_event *cq_event;
14312 unsigned long iflags;
14314 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14315 "0392 Async Event: word0:x%x, word1:x%x, "
14316 "word2:x%x, word3:x%x\n", mcqe->word0,
14317 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14319 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14323 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14324 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14325 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14327 /* Set the async event flag */
14328 spin_lock_irqsave(&phba->hbalock, iflags);
14329 phba->hba_flag |= ASYNC_EVENT;
14330 spin_unlock_irqrestore(&phba->hbalock, iflags);
14336 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14337 * @phba: Pointer to HBA context object.
14338 * @mcqe: Pointer to mailbox completion queue entry.
14340 * This routine process a mailbox completion queue entry with mailbox
14341 * completion event.
14343 * Return: true if work posted to worker thread, otherwise false.
14346 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14348 uint32_t mcqe_status;
14349 MAILBOX_t *mbox, *pmbox;
14350 struct lpfc_mqe *mqe;
14351 struct lpfc_vport *vport;
14352 struct lpfc_nodelist *ndlp;
14353 struct lpfc_dmabuf *mp;
14354 unsigned long iflags;
14356 bool workposted = false;
14359 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14360 if (!bf_get(lpfc_trailer_completed, mcqe))
14361 goto out_no_mqe_complete;
14363 /* Get the reference to the active mbox command */
14364 spin_lock_irqsave(&phba->hbalock, iflags);
14365 pmb = phba->sli.mbox_active;
14366 if (unlikely(!pmb)) {
14367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14368 "1832 No pending MBOX command to handle\n");
14369 spin_unlock_irqrestore(&phba->hbalock, iflags);
14370 goto out_no_mqe_complete;
14372 spin_unlock_irqrestore(&phba->hbalock, iflags);
14374 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14376 vport = pmb->vport;
14378 /* Reset heartbeat timer */
14379 phba->last_completion_time = jiffies;
14380 del_timer(&phba->sli.mbox_tmo);
14382 /* Move mbox data to caller's mailbox region, do endian swapping */
14383 if (pmb->mbox_cmpl && mbox)
14384 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14387 * For mcqe errors, conditionally move a modified error code to
14388 * the mbox so that the error will not be missed.
14390 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14391 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14392 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14393 bf_set(lpfc_mqe_status, mqe,
14394 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14396 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14397 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14398 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14399 "MBOX dflt rpi: status:x%x rpi:x%x",
14401 pmbox->un.varWords[0], 0);
14402 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14403 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14404 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14406 /* Reg_LOGIN of dflt RPI was successful. Mark the
14407 * node as having an UNREG_LOGIN in progress to stop
14408 * an unsolicited PLOGI from the same NPortId from
14409 * starting another mailbox transaction.
14411 spin_lock_irqsave(&ndlp->lock, iflags);
14412 ndlp->nlp_flag |= NLP_UNREG_INP;
14413 spin_unlock_irqrestore(&ndlp->lock, iflags);
14414 lpfc_unreg_login(phba, vport->vpi,
14415 pmbox->un.varWords[0], pmb);
14416 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14419 /* No reference taken here. This is a default
14420 * RPI reg/immediate unreg cycle. The reference was
14421 * taken in the reg rpi path and is released when
14422 * this mailbox completes.
14424 pmb->ctx_ndlp = ndlp;
14425 pmb->vport = vport;
14426 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14427 if (rc != MBX_BUSY)
14428 lpfc_printf_log(phba, KERN_ERR,
14431 "have been MBX_BUSY\n");
14432 if (rc != MBX_NOT_FINISHED)
14433 goto send_current_mbox;
14436 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14437 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14438 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14440 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14441 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14442 spin_lock_irqsave(&phba->hbalock, iflags);
14443 /* Release the mailbox command posting token */
14444 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14445 phba->sli.mbox_active = NULL;
14446 if (bf_get(lpfc_trailer_consumed, mcqe))
14447 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14448 spin_unlock_irqrestore(&phba->hbalock, iflags);
14450 /* Post the next mbox command, if there is one */
14451 lpfc_sli4_post_async_mbox(phba);
14453 /* Process cmpl now */
14454 if (pmb->mbox_cmpl)
14455 pmb->mbox_cmpl(phba, pmb);
14459 /* There is mailbox completion work to queue to the worker thread */
14460 spin_lock_irqsave(&phba->hbalock, iflags);
14461 __lpfc_mbox_cmpl_put(phba, pmb);
14462 phba->work_ha |= HA_MBATT;
14463 spin_unlock_irqrestore(&phba->hbalock, iflags);
14467 spin_lock_irqsave(&phba->hbalock, iflags);
14468 /* Release the mailbox command posting token */
14469 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14470 /* Setting active mailbox pointer need to be in sync to flag clear */
14471 phba->sli.mbox_active = NULL;
14472 if (bf_get(lpfc_trailer_consumed, mcqe))
14473 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14474 spin_unlock_irqrestore(&phba->hbalock, iflags);
14475 /* Wake up worker thread to post the next pending mailbox command */
14476 lpfc_worker_wake_up(phba);
14479 out_no_mqe_complete:
14480 spin_lock_irqsave(&phba->hbalock, iflags);
14481 if (bf_get(lpfc_trailer_consumed, mcqe))
14482 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14483 spin_unlock_irqrestore(&phba->hbalock, iflags);
14488 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14489 * @phba: Pointer to HBA context object.
14490 * @cq: Pointer to associated CQ
14491 * @cqe: Pointer to mailbox completion queue entry.
14493 * This routine process a mailbox completion queue entry, it invokes the
14494 * proper mailbox complete handling or asynchronous event handling routine
14495 * according to the MCQE's async bit.
14497 * Return: true if work posted to worker thread, otherwise false.
14500 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14501 struct lpfc_cqe *cqe)
14503 struct lpfc_mcqe mcqe;
14508 /* Copy the mailbox MCQE and convert endian order as needed */
14509 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14511 /* Invoke the proper event handling routine */
14512 if (!bf_get(lpfc_trailer_async, &mcqe))
14513 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14515 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14520 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14521 * @phba: Pointer to HBA context object.
14522 * @cq: Pointer to associated CQ
14523 * @wcqe: Pointer to work-queue completion queue entry.
14525 * This routine handles an ELS work-queue completion event.
14527 * Return: true if work posted to worker thread, otherwise false.
14530 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14531 struct lpfc_wcqe_complete *wcqe)
14533 struct lpfc_iocbq *irspiocbq;
14534 unsigned long iflags;
14535 struct lpfc_sli_ring *pring = cq->pring;
14537 int txcmplq_cnt = 0;
14539 /* Check for response status */
14540 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14541 /* Log the error status */
14542 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14543 "0357 ELS CQE error: status=x%x: "
14544 "CQE: %08x %08x %08x %08x\n",
14545 bf_get(lpfc_wcqe_c_status, wcqe),
14546 wcqe->word0, wcqe->total_data_placed,
14547 wcqe->parameter, wcqe->word3);
14550 /* Get an irspiocbq for later ELS response processing use */
14551 irspiocbq = lpfc_sli_get_iocbq(phba);
14553 if (!list_empty(&pring->txq))
14555 if (!list_empty(&pring->txcmplq))
14557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14558 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14559 "els_txcmplq_cnt=%d\n",
14560 txq_cnt, phba->iocb_cnt,
14565 /* Save off the slow-path queue event for work thread to process */
14566 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14567 spin_lock_irqsave(&phba->hbalock, iflags);
14568 list_add_tail(&irspiocbq->cq_event.list,
14569 &phba->sli4_hba.sp_queue_event);
14570 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14571 spin_unlock_irqrestore(&phba->hbalock, iflags);
14577 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14578 * @phba: Pointer to HBA context object.
14579 * @wcqe: Pointer to work-queue completion queue entry.
14581 * This routine handles slow-path WQ entry consumed event by invoking the
14582 * proper WQ release routine to the slow-path WQ.
14585 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14586 struct lpfc_wcqe_release *wcqe)
14588 /* sanity check on queue memory */
14589 if (unlikely(!phba->sli4_hba.els_wq))
14591 /* Check for the slow-path ELS work queue */
14592 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14593 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14594 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14596 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14597 "2579 Slow-path wqe consume event carries "
14598 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14599 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14600 phba->sli4_hba.els_wq->queue_id);
14604 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14605 * @phba: Pointer to HBA context object.
14606 * @cq: Pointer to a WQ completion queue.
14607 * @wcqe: Pointer to work-queue completion queue entry.
14609 * This routine handles an XRI abort event.
14611 * Return: true if work posted to worker thread, otherwise false.
14614 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14615 struct lpfc_queue *cq,
14616 struct sli4_wcqe_xri_aborted *wcqe)
14618 bool workposted = false;
14619 struct lpfc_cq_event *cq_event;
14620 unsigned long iflags;
14622 switch (cq->subtype) {
14624 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14625 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14626 /* Notify aborted XRI for NVME work queue */
14627 if (phba->nvmet_support)
14628 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14630 workposted = false;
14632 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14634 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14636 workposted = false;
14639 cq_event->hdwq = cq->hdwq;
14640 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14642 list_add_tail(&cq_event->list,
14643 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14644 /* Set the els xri abort event flag */
14645 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14646 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14651 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14652 "0603 Invalid CQ subtype %d: "
14653 "%08x %08x %08x %08x\n",
14654 cq->subtype, wcqe->word0, wcqe->parameter,
14655 wcqe->word2, wcqe->word3);
14656 workposted = false;
14662 #define FC_RCTL_MDS_DIAGS 0xF4
14665 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14666 * @phba: Pointer to HBA context object.
14667 * @rcqe: Pointer to receive-queue completion queue entry.
14669 * This routine process a receive-queue completion queue entry.
14671 * Return: true if work posted to worker thread, otherwise false.
14674 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14676 bool workposted = false;
14677 struct fc_frame_header *fc_hdr;
14678 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14679 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14680 struct lpfc_nvmet_tgtport *tgtp;
14681 struct hbq_dmabuf *dma_buf;
14682 uint32_t status, rq_id;
14683 unsigned long iflags;
14685 /* sanity check on queue memory */
14686 if (unlikely(!hrq) || unlikely(!drq))
14689 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14690 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14692 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14693 if (rq_id != hrq->queue_id)
14696 status = bf_get(lpfc_rcqe_status, rcqe);
14698 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14700 "2537 Receive Frame Truncated!!\n");
14702 case FC_STATUS_RQ_SUCCESS:
14703 spin_lock_irqsave(&phba->hbalock, iflags);
14704 lpfc_sli4_rq_release(hrq, drq);
14705 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14707 hrq->RQ_no_buf_found++;
14708 spin_unlock_irqrestore(&phba->hbalock, iflags);
14712 hrq->RQ_buf_posted--;
14713 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14715 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14717 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14718 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14719 spin_unlock_irqrestore(&phba->hbalock, iflags);
14720 /* Handle MDS Loopback frames */
14721 if (!(phba->pport->load_flag & FC_UNLOADING))
14722 lpfc_sli4_handle_mds_loopback(phba->pport,
14725 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14729 /* save off the frame for the work thread to process */
14730 list_add_tail(&dma_buf->cq_event.list,
14731 &phba->sli4_hba.sp_queue_event);
14732 /* Frame received */
14733 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14734 spin_unlock_irqrestore(&phba->hbalock, iflags);
14737 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14738 if (phba->nvmet_support) {
14739 tgtp = phba->targetport->private;
14740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14741 "6402 RQE Error x%x, posted %d err_cnt "
14743 status, hrq->RQ_buf_posted,
14744 hrq->RQ_no_posted_buf,
14745 atomic_read(&tgtp->rcv_fcp_cmd_in),
14746 atomic_read(&tgtp->rcv_fcp_cmd_out),
14747 atomic_read(&tgtp->xmt_fcp_release));
14751 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14752 hrq->RQ_no_posted_buf++;
14753 /* Post more buffers if possible */
14754 spin_lock_irqsave(&phba->hbalock, iflags);
14755 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14756 spin_unlock_irqrestore(&phba->hbalock, iflags);
14765 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14766 * @phba: Pointer to HBA context object.
14767 * @cq: Pointer to the completion queue.
14768 * @cqe: Pointer to a completion queue entry.
14770 * This routine process a slow-path work-queue or receive queue completion queue
14773 * Return: true if work posted to worker thread, otherwise false.
14776 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14777 struct lpfc_cqe *cqe)
14779 struct lpfc_cqe cqevt;
14780 bool workposted = false;
14782 /* Copy the work queue CQE and convert endian order if needed */
14783 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14785 /* Check and process for different type of WCQE and dispatch */
14786 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14787 case CQE_CODE_COMPL_WQE:
14788 /* Process the WQ/RQ complete event */
14789 phba->last_completion_time = jiffies;
14790 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14791 (struct lpfc_wcqe_complete *)&cqevt);
14793 case CQE_CODE_RELEASE_WQE:
14794 /* Process the WQ release event */
14795 lpfc_sli4_sp_handle_rel_wcqe(phba,
14796 (struct lpfc_wcqe_release *)&cqevt);
14798 case CQE_CODE_XRI_ABORTED:
14799 /* Process the WQ XRI abort event */
14800 phba->last_completion_time = jiffies;
14801 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14802 (struct sli4_wcqe_xri_aborted *)&cqevt);
14804 case CQE_CODE_RECEIVE:
14805 case CQE_CODE_RECEIVE_V1:
14806 /* Process the RQ event */
14807 phba->last_completion_time = jiffies;
14808 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14809 (struct lpfc_rcqe *)&cqevt);
14812 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14813 "0388 Not a valid WCQE code: x%x\n",
14814 bf_get(lpfc_cqe_code, &cqevt));
14821 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14822 * @phba: Pointer to HBA context object.
14823 * @eqe: Pointer to fast-path event queue entry.
14824 * @speq: Pointer to slow-path event queue.
14826 * This routine process a event queue entry from the slow-path event queue.
14827 * It will check the MajorCode and MinorCode to determine this is for a
14828 * completion event on a completion queue, if not, an error shall be logged
14829 * and just return. Otherwise, it will get to the corresponding completion
14830 * queue and process all the entries on that completion queue, rearm the
14831 * completion queue, and then return.
14835 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14836 struct lpfc_queue *speq)
14838 struct lpfc_queue *cq = NULL, *childq;
14842 /* Get the reference to the corresponding CQ */
14843 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14845 list_for_each_entry(childq, &speq->child_list, list) {
14846 if (childq->queue_id == cqid) {
14851 if (unlikely(!cq)) {
14852 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14853 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14854 "0365 Slow-path CQ identifier "
14855 "(%d) does not exist\n", cqid);
14859 /* Save EQ associated with this CQ */
14860 cq->assoc_qp = speq;
14862 if (is_kdump_kernel())
14863 ret = queue_work(phba->wq, &cq->spwork);
14865 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14868 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14869 "0390 Cannot schedule queue work "
14870 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14871 cqid, cq->queue_id, raw_smp_processor_id());
14875 * __lpfc_sli4_process_cq - Process elements of a CQ
14876 * @phba: Pointer to HBA context object.
14877 * @cq: Pointer to CQ to be processed
14878 * @handler: Routine to process each cqe
14879 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14880 * @poll_mode: Polling mode we were called from
14882 * This routine processes completion queue entries in a CQ. While a valid
14883 * queue element is found, the handler is called. During processing checks
14884 * are made for periodic doorbell writes to let the hardware know of
14885 * element consumption.
14887 * If the max limit on cqes to process is hit, or there are no more valid
14888 * entries, the loop stops. If we processed a sufficient number of elements,
14889 * meaning there is sufficient load, rather than rearming and generating
14890 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14891 * indicates no rescheduling.
14893 * Returns True if work scheduled, False otherwise.
14896 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14897 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14898 struct lpfc_cqe *), unsigned long *delay,
14899 enum lpfc_poll_mode poll_mode)
14901 struct lpfc_cqe *cqe;
14902 bool workposted = false;
14903 int count = 0, consumed = 0;
14906 /* default - no reschedule */
14909 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14910 goto rearm_and_exit;
14912 /* Process all the entries to the CQ */
14914 cqe = lpfc_sli4_cq_get(cq);
14916 workposted |= handler(phba, cq, cqe);
14917 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14920 if (!(++count % cq->max_proc_limit))
14923 if (!(count % cq->notify_interval)) {
14924 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14927 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14930 if (count == LPFC_NVMET_CQ_NOTIFY)
14931 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14933 cqe = lpfc_sli4_cq_get(cq);
14935 if (count >= phba->cfg_cq_poll_threshold) {
14940 /* Note: complete the irq_poll softirq before rearming CQ */
14941 if (poll_mode == LPFC_IRQ_POLL)
14942 irq_poll_complete(&cq->iop);
14944 /* Track the max number of CQEs processed in 1 EQ */
14945 if (count > cq->CQ_max_cqe)
14946 cq->CQ_max_cqe = count;
14948 cq->assoc_qp->EQ_cqe_cnt += count;
14950 /* Catch the no cq entry condition */
14951 if (unlikely(count == 0))
14952 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14953 "0369 No entry from completion queue "
14954 "qid=%d\n", cq->queue_id);
14956 xchg(&cq->queue_claimed, 0);
14959 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14960 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14966 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14967 * @cq: pointer to CQ to process
14969 * This routine calls the cq processing routine with a handler specific
14970 * to the type of queue bound to it.
14972 * The CQ routine returns two values: the first is the calling status,
14973 * which indicates whether work was queued to the background discovery
14974 * thread. If true, the routine should wakeup the discovery thread;
14975 * the second is the delay parameter. If non-zero, rather than rearming
14976 * the CQ and yet another interrupt, the CQ handler should be queued so
14977 * that it is processed in a subsequent polling action. The value of
14978 * the delay indicates when to reschedule it.
14981 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14983 struct lpfc_hba *phba = cq->phba;
14984 unsigned long delay;
14985 bool workposted = false;
14988 /* Process and rearm the CQ */
14989 switch (cq->type) {
14991 workposted |= __lpfc_sli4_process_cq(phba, cq,
14992 lpfc_sli4_sp_handle_mcqe,
14993 &delay, LPFC_QUEUE_WORK);
14996 if (cq->subtype == LPFC_IO)
14997 workposted |= __lpfc_sli4_process_cq(phba, cq,
14998 lpfc_sli4_fp_handle_cqe,
14999 &delay, LPFC_QUEUE_WORK);
15001 workposted |= __lpfc_sli4_process_cq(phba, cq,
15002 lpfc_sli4_sp_handle_cqe,
15003 &delay, LPFC_QUEUE_WORK);
15006 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15007 "0370 Invalid completion queue type (%d)\n",
15013 if (is_kdump_kernel())
15014 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
15017 ret = queue_delayed_work_on(cq->chann, phba->wq,
15018 &cq->sched_spwork, delay);
15020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15021 "0394 Cannot schedule queue work "
15022 "for cqid=%d on CPU %d\n",
15023 cq->queue_id, cq->chann);
15026 /* wake up worker thread if there are works to be done */
15028 lpfc_worker_wake_up(phba);
15032 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
15034 * @work: pointer to work element
15036 * translates from the work handler and calls the slow-path handler.
15039 lpfc_sli4_sp_process_cq(struct work_struct *work)
15041 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
15043 __lpfc_sli4_sp_process_cq(cq);
15047 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
15048 * @work: pointer to work element
15050 * translates from the work handler and calls the slow-path handler.
15053 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15055 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15056 struct lpfc_queue, sched_spwork);
15058 __lpfc_sli4_sp_process_cq(cq);
15062 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15063 * @phba: Pointer to HBA context object.
15064 * @cq: Pointer to associated CQ
15065 * @wcqe: Pointer to work-queue completion queue entry.
15067 * This routine process a fast-path work queue completion entry from fast-path
15068 * event queue for FCP command response completion.
15071 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15072 struct lpfc_wcqe_complete *wcqe)
15074 struct lpfc_sli_ring *pring = cq->pring;
15075 struct lpfc_iocbq *cmdiocbq;
15076 struct lpfc_iocbq irspiocbq;
15077 unsigned long iflags;
15079 /* Check for response status */
15080 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15081 /* If resource errors reported from HBA, reduce queue
15082 * depth of the SCSI device.
15084 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15085 IOSTAT_LOCAL_REJECT)) &&
15086 ((wcqe->parameter & IOERR_PARAM_MASK) ==
15087 IOERR_NO_RESOURCES))
15088 phba->lpfc_rampdown_queue_depth(phba);
15090 /* Log the cmpl status */
15091 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15092 "0373 FCP CQE cmpl: status=x%x: "
15093 "CQE: %08x %08x %08x %08x\n",
15094 bf_get(lpfc_wcqe_c_status, wcqe),
15095 wcqe->word0, wcqe->total_data_placed,
15096 wcqe->parameter, wcqe->word3);
15099 /* Look up the FCP command IOCB and create pseudo response IOCB */
15100 spin_lock_irqsave(&pring->ring_lock, iflags);
15101 pring->stats.iocb_event++;
15102 spin_unlock_irqrestore(&pring->ring_lock, iflags);
15103 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15104 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15105 if (unlikely(!cmdiocbq)) {
15106 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15107 "0374 FCP complete with no corresponding "
15108 "cmdiocb: iotag (%d)\n",
15109 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15112 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15113 cmdiocbq->isr_timestamp = cq->isr_timestamp;
15115 if (cmdiocbq->iocb_cmpl == NULL) {
15116 if (cmdiocbq->wqe_cmpl) {
15117 /* For FCP the flag is cleared in wqe_cmpl */
15118 if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
15119 cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
15120 spin_lock_irqsave(&phba->hbalock, iflags);
15121 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
15122 spin_unlock_irqrestore(&phba->hbalock, iflags);
15125 /* Pass the cmd_iocb and the wcqe to the upper layer */
15126 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
15129 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15130 "0375 FCP cmdiocb not callback function "
15132 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15136 /* Only SLI4 non-IO commands stil use IOCB */
15137 /* Fake the irspiocb and copy necessary response information */
15138 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
15140 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
15141 spin_lock_irqsave(&phba->hbalock, iflags);
15142 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
15143 spin_unlock_irqrestore(&phba->hbalock, iflags);
15146 /* Pass the cmd_iocb and the rsp state to the upper layer */
15147 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
15151 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15152 * @phba: Pointer to HBA context object.
15153 * @cq: Pointer to completion queue.
15154 * @wcqe: Pointer to work-queue completion queue entry.
15156 * This routine handles an fast-path WQ entry consumed event by invoking the
15157 * proper WQ release routine to the slow-path WQ.
15160 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15161 struct lpfc_wcqe_release *wcqe)
15163 struct lpfc_queue *childwq;
15164 bool wqid_matched = false;
15167 /* Check for fast-path FCP work queue release */
15168 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15169 list_for_each_entry(childwq, &cq->child_list, list) {
15170 if (childwq->queue_id == hba_wqid) {
15171 lpfc_sli4_wq_release(childwq,
15172 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15173 if (childwq->q_flag & HBA_NVMET_WQFULL)
15174 lpfc_nvmet_wqfull_process(phba, childwq);
15175 wqid_matched = true;
15179 /* Report warning log message if no match found */
15180 if (wqid_matched != true)
15181 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15182 "2580 Fast-path wqe consume event carries "
15183 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15187 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15188 * @phba: Pointer to HBA context object.
15189 * @cq: Pointer to completion queue.
15190 * @rcqe: Pointer to receive-queue completion queue entry.
15192 * This routine process a receive-queue completion queue entry.
15194 * Return: true if work posted to worker thread, otherwise false.
15197 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15198 struct lpfc_rcqe *rcqe)
15200 bool workposted = false;
15201 struct lpfc_queue *hrq;
15202 struct lpfc_queue *drq;
15203 struct rqb_dmabuf *dma_buf;
15204 struct fc_frame_header *fc_hdr;
15205 struct lpfc_nvmet_tgtport *tgtp;
15206 uint32_t status, rq_id;
15207 unsigned long iflags;
15208 uint32_t fctl, idx;
15210 if ((phba->nvmet_support == 0) ||
15211 (phba->sli4_hba.nvmet_cqset == NULL))
15214 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15215 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15216 drq = phba->sli4_hba.nvmet_mrq_data[idx];
15218 /* sanity check on queue memory */
15219 if (unlikely(!hrq) || unlikely(!drq))
15222 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15223 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15225 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15227 if ((phba->nvmet_support == 0) ||
15228 (rq_id != hrq->queue_id))
15231 status = bf_get(lpfc_rcqe_status, rcqe);
15233 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15235 "6126 Receive Frame Truncated!!\n");
15237 case FC_STATUS_RQ_SUCCESS:
15238 spin_lock_irqsave(&phba->hbalock, iflags);
15239 lpfc_sli4_rq_release(hrq, drq);
15240 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15242 hrq->RQ_no_buf_found++;
15243 spin_unlock_irqrestore(&phba->hbalock, iflags);
15246 spin_unlock_irqrestore(&phba->hbalock, iflags);
15248 hrq->RQ_buf_posted--;
15249 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15251 /* Just some basic sanity checks on FCP Command frame */
15252 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15253 fc_hdr->fh_f_ctl[1] << 8 |
15254 fc_hdr->fh_f_ctl[2]);
15256 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15257 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15258 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15261 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15262 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15263 lpfc_nvmet_unsol_fcp_event(
15264 phba, idx, dma_buf, cq->isr_timestamp,
15265 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15269 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15271 case FC_STATUS_INSUFF_BUF_FRM_DISC:
15272 if (phba->nvmet_support) {
15273 tgtp = phba->targetport->private;
15274 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15275 "6401 RQE Error x%x, posted %d err_cnt "
15277 status, hrq->RQ_buf_posted,
15278 hrq->RQ_no_posted_buf,
15279 atomic_read(&tgtp->rcv_fcp_cmd_in),
15280 atomic_read(&tgtp->rcv_fcp_cmd_out),
15281 atomic_read(&tgtp->xmt_fcp_release));
15285 case FC_STATUS_INSUFF_BUF_NEED_BUF:
15286 hrq->RQ_no_posted_buf++;
15287 /* Post more buffers if possible */
15295 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15296 * @phba: adapter with cq
15297 * @cq: Pointer to the completion queue.
15298 * @cqe: Pointer to fast-path completion queue entry.
15300 * This routine process a fast-path work queue completion entry from fast-path
15301 * event queue for FCP command response completion.
15303 * Return: true if work posted to worker thread, otherwise false.
15306 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15307 struct lpfc_cqe *cqe)
15309 struct lpfc_wcqe_release wcqe;
15310 bool workposted = false;
15312 /* Copy the work queue CQE and convert endian order if needed */
15313 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15315 /* Check and process for different type of WCQE and dispatch */
15316 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15317 case CQE_CODE_COMPL_WQE:
15318 case CQE_CODE_NVME_ERSP:
15320 /* Process the WQ complete event */
15321 phba->last_completion_time = jiffies;
15322 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15323 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15324 (struct lpfc_wcqe_complete *)&wcqe);
15326 case CQE_CODE_RELEASE_WQE:
15327 cq->CQ_release_wqe++;
15328 /* Process the WQ release event */
15329 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15330 (struct lpfc_wcqe_release *)&wcqe);
15332 case CQE_CODE_XRI_ABORTED:
15333 cq->CQ_xri_aborted++;
15334 /* Process the WQ XRI abort event */
15335 phba->last_completion_time = jiffies;
15336 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15337 (struct sli4_wcqe_xri_aborted *)&wcqe);
15339 case CQE_CODE_RECEIVE_V1:
15340 case CQE_CODE_RECEIVE:
15341 phba->last_completion_time = jiffies;
15342 if (cq->subtype == LPFC_NVMET) {
15343 workposted = lpfc_sli4_nvmet_handle_rcqe(
15344 phba, cq, (struct lpfc_rcqe *)&wcqe);
15348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15349 "0144 Not a valid CQE code: x%x\n",
15350 bf_get(lpfc_wcqe_c_code, &wcqe));
15357 * lpfc_sli4_sched_cq_work - Schedules cq work
15358 * @phba: Pointer to HBA context object.
15359 * @cq: Pointer to CQ
15362 * This routine checks the poll mode of the CQ corresponding to
15363 * cq->chann, then either schedules a softirq or queue_work to complete
15366 * queue_work path is taken if in NVMET mode, or if poll_mode is in
15367 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
15370 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15371 struct lpfc_queue *cq, uint16_t cqid)
15375 switch (cq->poll_mode) {
15376 case LPFC_IRQ_POLL:
15377 /* CGN mgmt is mutually exclusive from softirq processing */
15378 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15379 irq_poll_sched(&cq->iop);
15383 case LPFC_QUEUE_WORK:
15385 if (is_kdump_kernel())
15386 ret = queue_work(phba->wq, &cq->irqwork);
15388 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15391 "0383 Cannot schedule queue work "
15392 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15393 cqid, cq->queue_id,
15394 raw_smp_processor_id());
15399 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15400 * @phba: Pointer to HBA context object.
15401 * @eq: Pointer to the queue structure.
15402 * @eqe: Pointer to fast-path event queue entry.
15404 * This routine process a event queue entry from the fast-path event queue.
15405 * It will check the MajorCode and MinorCode to determine this is for a
15406 * completion event on a completion queue, if not, an error shall be logged
15407 * and just return. Otherwise, it will get to the corresponding completion
15408 * queue and process all the entries on the completion queue, rearm the
15409 * completion queue, and then return.
15412 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15413 struct lpfc_eqe *eqe)
15415 struct lpfc_queue *cq = NULL;
15416 uint32_t qidx = eq->hdwq;
15419 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15421 "0366 Not a valid completion "
15422 "event: majorcode=x%x, minorcode=x%x\n",
15423 bf_get_le32(lpfc_eqe_major_code, eqe),
15424 bf_get_le32(lpfc_eqe_minor_code, eqe));
15428 /* Get the reference to the corresponding CQ */
15429 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15431 /* Use the fast lookup method first */
15432 if (cqid <= phba->sli4_hba.cq_max) {
15433 cq = phba->sli4_hba.cq_lookup[cqid];
15438 /* Next check for NVMET completion */
15439 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15440 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15441 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15442 /* Process NVMET unsol rcv */
15443 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15448 if (phba->sli4_hba.nvmels_cq &&
15449 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15450 /* Process NVME unsol rcv */
15451 cq = phba->sli4_hba.nvmels_cq;
15454 /* Otherwise this is a Slow path event */
15456 lpfc_sli4_sp_handle_eqe(phba, eqe,
15457 phba->sli4_hba.hdwq[qidx].hba_eq);
15462 if (unlikely(cqid != cq->queue_id)) {
15463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15464 "0368 Miss-matched fast-path completion "
15465 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15466 cqid, cq->queue_id);
15471 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15472 if (phba->ktime_on)
15473 cq->isr_timestamp = ktime_get_ns();
15475 cq->isr_timestamp = 0;
15477 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15481 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15482 * @cq: Pointer to CQ to be processed
15483 * @poll_mode: Enum lpfc_poll_state to determine poll mode
15485 * This routine calls the cq processing routine with the handler for
15488 * The CQ routine returns two values: the first is the calling status,
15489 * which indicates whether work was queued to the background discovery
15490 * thread. If true, the routine should wakeup the discovery thread;
15491 * the second is the delay parameter. If non-zero, rather than rearming
15492 * the CQ and yet another interrupt, the CQ handler should be queued so
15493 * that it is processed in a subsequent polling action. The value of
15494 * the delay indicates when to reschedule it.
15497 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15498 enum lpfc_poll_mode poll_mode)
15500 struct lpfc_hba *phba = cq->phba;
15501 unsigned long delay;
15502 bool workposted = false;
15505 /* process and rearm the CQ */
15506 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15507 &delay, poll_mode);
15510 if (is_kdump_kernel())
15511 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15514 ret = queue_delayed_work_on(cq->chann, phba->wq,
15515 &cq->sched_irqwork, delay);
15517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15518 "0367 Cannot schedule queue work "
15519 "for cqid=%d on CPU %d\n",
15520 cq->queue_id, cq->chann);
15523 /* wake up worker thread if there are works to be done */
15525 lpfc_worker_wake_up(phba);
15529 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15531 * @work: pointer to work element
15533 * translates from the work handler and calls the fast-path handler.
15536 lpfc_sli4_hba_process_cq(struct work_struct *work)
15538 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15540 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15544 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15545 * @work: pointer to work element
15547 * translates from the work handler and calls the fast-path handler.
15550 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15552 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15553 struct lpfc_queue, sched_irqwork);
15555 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15559 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15560 * @irq: Interrupt number.
15561 * @dev_id: The device context pointer.
15563 * This function is directly called from the PCI layer as an interrupt
15564 * service routine when device with SLI-4 interface spec is enabled with
15565 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15566 * ring event in the HBA. However, when the device is enabled with either
15567 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15568 * device-level interrupt handler. When the PCI slot is in error recovery
15569 * or the HBA is undergoing initialization, the interrupt handler will not
15570 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15571 * the intrrupt context. This function is called without any lock held.
15572 * It gets the hbalock to access and update SLI data structures. Note that,
15573 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15574 * equal to that of FCP CQ index.
15576 * The link attention and ELS ring attention events are handled
15577 * by the worker thread. The interrupt handler signals the worker thread
15578 * and returns for these events. This function is called without any lock
15579 * held. It gets the hbalock to access and update SLI data structures.
15581 * This function returns IRQ_HANDLED when interrupt is handled else it
15582 * returns IRQ_NONE.
15585 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15587 struct lpfc_hba *phba;
15588 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15589 struct lpfc_queue *fpeq;
15590 unsigned long iflag;
15593 struct lpfc_eq_intr_info *eqi;
15595 /* Get the driver's phba structure from the dev_id */
15596 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15597 phba = hba_eq_hdl->phba;
15598 hba_eqidx = hba_eq_hdl->idx;
15600 if (unlikely(!phba))
15602 if (unlikely(!phba->sli4_hba.hdwq))
15605 /* Get to the EQ struct associated with this vector */
15606 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15607 if (unlikely(!fpeq))
15610 /* Check device state for handling interrupt */
15611 if (unlikely(lpfc_intr_state_check(phba))) {
15612 /* Check again for link_state with lock held */
15613 spin_lock_irqsave(&phba->hbalock, iflag);
15614 if (phba->link_state < LPFC_LINK_DOWN)
15615 /* Flush, clear interrupt, and rearm the EQ */
15616 lpfc_sli4_eqcq_flush(phba, fpeq);
15617 spin_unlock_irqrestore(&phba->hbalock, iflag);
15621 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15624 fpeq->last_cpu = raw_smp_processor_id();
15626 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15627 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15628 phba->cfg_auto_imax &&
15629 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15630 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15631 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15633 /* process and rearm the EQ */
15634 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15636 if (unlikely(ecount == 0)) {
15637 fpeq->EQ_no_entry++;
15638 if (phba->intr_type == MSIX)
15639 /* MSI-X treated interrupt served as no EQ share INT */
15640 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15641 "0358 MSI-X interrupt with no EQE\n");
15643 /* Non MSI-X treated on interrupt as EQ share INT */
15647 return IRQ_HANDLED;
15648 } /* lpfc_sli4_hba_intr_handler */
15651 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15652 * @irq: Interrupt number.
15653 * @dev_id: The device context pointer.
15655 * This function is the device-level interrupt handler to device with SLI-4
15656 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15657 * interrupt mode is enabled and there is an event in the HBA which requires
15658 * driver attention. This function invokes the slow-path interrupt attention
15659 * handling function and fast-path interrupt attention handling function in
15660 * turn to process the relevant HBA attention events. This function is called
15661 * without any lock held. It gets the hbalock to access and update SLI data
15664 * This function returns IRQ_HANDLED when interrupt is handled, else it
15665 * returns IRQ_NONE.
15668 lpfc_sli4_intr_handler(int irq, void *dev_id)
15670 struct lpfc_hba *phba;
15671 irqreturn_t hba_irq_rc;
15672 bool hba_handled = false;
15675 /* Get the driver's phba structure from the dev_id */
15676 phba = (struct lpfc_hba *)dev_id;
15678 if (unlikely(!phba))
15682 * Invoke fast-path host attention interrupt handling as appropriate.
15684 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15685 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15686 &phba->sli4_hba.hba_eq_hdl[qidx]);
15687 if (hba_irq_rc == IRQ_HANDLED)
15688 hba_handled |= true;
15691 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15692 } /* lpfc_sli4_intr_handler */
15694 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15696 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15697 struct lpfc_queue *eq;
15702 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15703 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15704 if (!list_empty(&phba->poll_list))
15705 mod_timer(&phba->cpuhp_poll_timer,
15706 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15711 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15713 struct lpfc_hba *phba = eq->phba;
15717 * Unlocking an irq is one of the entry point to check
15718 * for re-schedule, but we are good for io submission
15719 * path as midlayer does a get_cpu to glue us in. Flush
15720 * out the invalidate queue so we can see the updated
15725 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15726 /* We will not likely get the completion for the caller
15727 * during this iteration but i guess that's fine.
15728 * Future io's coming on this eq should be able to
15729 * pick it up. As for the case of single io's, they
15730 * will be handled through a sched from polling timer
15731 * function which is currently triggered every 1msec.
15733 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15738 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15740 struct lpfc_hba *phba = eq->phba;
15742 /* kickstart slowpath processing if needed */
15743 if (list_empty(&phba->poll_list))
15744 mod_timer(&phba->cpuhp_poll_timer,
15745 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15747 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15751 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15753 struct lpfc_hba *phba = eq->phba;
15755 /* Disable slowpath processing for this eq. Kick start the eq
15756 * by RE-ARMING the eq's ASAP
15758 list_del_rcu(&eq->_poll_list);
15761 if (list_empty(&phba->poll_list))
15762 del_timer_sync(&phba->cpuhp_poll_timer);
15765 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15767 struct lpfc_queue *eq, *next;
15769 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15770 list_del(&eq->_poll_list);
15772 INIT_LIST_HEAD(&phba->poll_list);
15777 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15779 if (mode == eq->mode)
15782 * currently this function is only called during a hotplug
15783 * event and the cpu on which this function is executing
15784 * is going offline. By now the hotplug has instructed
15785 * the scheduler to remove this cpu from cpu active mask.
15786 * So we don't need to work about being put aside by the
15787 * scheduler for a high priority process. Yes, the inte-
15788 * rrupts could come but they are known to retire ASAP.
15791 /* Disable polling in the fastpath */
15792 WRITE_ONCE(eq->mode, mode);
15793 /* flush out the store buffer */
15797 * Add this eq to the polling list and start polling. For
15798 * a grace period both interrupt handler and poller will
15799 * try to process the eq _but_ that's fine. We have a
15800 * synchronization mechanism in place (queue_claimed) to
15801 * deal with it. This is just a draining phase for int-
15802 * errupt handler (not eq's) as we have guranteed through
15803 * barrier that all the CPUs have seen the new CQ_POLLED
15804 * state. which will effectively disable the REARMING of
15805 * the EQ. The whole idea is eq's die off eventually as
15806 * we are not rearming EQ's anymore.
15808 mode ? lpfc_sli4_add_to_poll_list(eq) :
15809 lpfc_sli4_remove_from_poll_list(eq);
15812 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15814 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15817 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15819 struct lpfc_hba *phba = eq->phba;
15821 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15823 /* Kick start for the pending io's in h/w.
15824 * Once we switch back to interrupt processing on a eq
15825 * the io path completion will only arm eq's when it
15826 * receives a completion. But since eq's are in disa-
15827 * rmed state it doesn't receive a completion. This
15828 * creates a deadlock scenaro.
15830 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15834 * lpfc_sli4_queue_free - free a queue structure and associated memory
15835 * @queue: The queue structure to free.
15837 * This function frees a queue structure and the DMAable memory used for
15838 * the host resident queue. This function must be called after destroying the
15839 * queue on the HBA.
15842 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15844 struct lpfc_dmabuf *dmabuf;
15849 if (!list_empty(&queue->wq_list))
15850 list_del(&queue->wq_list);
15852 while (!list_empty(&queue->page_list)) {
15853 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15855 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15856 dmabuf->virt, dmabuf->phys);
15860 lpfc_free_rq_buffer(queue->phba, queue);
15861 kfree(queue->rqbp);
15864 if (!list_empty(&queue->cpu_list))
15865 list_del(&queue->cpu_list);
15872 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15873 * @phba: The HBA that this queue is being created on.
15874 * @page_size: The size of a queue page
15875 * @entry_size: The size of each queue entry for this queue.
15876 * @entry_count: The number of entries that this queue will handle.
15877 * @cpu: The cpu that will primarily utilize this queue.
15879 * This function allocates a queue structure and the DMAable memory used for
15880 * the host resident queue. This function must be called before creating the
15881 * queue on the HBA.
15883 struct lpfc_queue *
15884 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15885 uint32_t entry_size, uint32_t entry_count, int cpu)
15887 struct lpfc_queue *queue;
15888 struct lpfc_dmabuf *dmabuf;
15889 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15892 if (!phba->sli4_hba.pc_sli4_params.supported)
15893 hw_page_size = page_size;
15895 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15897 /* If needed, Adjust page count to match the max the adapter supports */
15898 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15899 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15901 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15902 GFP_KERNEL, cpu_to_node(cpu));
15906 INIT_LIST_HEAD(&queue->list);
15907 INIT_LIST_HEAD(&queue->_poll_list);
15908 INIT_LIST_HEAD(&queue->wq_list);
15909 INIT_LIST_HEAD(&queue->wqfull_list);
15910 INIT_LIST_HEAD(&queue->page_list);
15911 INIT_LIST_HEAD(&queue->child_list);
15912 INIT_LIST_HEAD(&queue->cpu_list);
15914 /* Set queue parameters now. If the system cannot provide memory
15915 * resources, the free routine needs to know what was allocated.
15917 queue->page_count = pgcnt;
15918 queue->q_pgs = (void **)&queue[1];
15919 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15920 queue->entry_size = entry_size;
15921 queue->entry_count = entry_count;
15922 queue->page_size = hw_page_size;
15923 queue->phba = phba;
15925 for (x = 0; x < queue->page_count; x++) {
15926 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15927 dev_to_node(&phba->pcidev->dev));
15930 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15931 hw_page_size, &dmabuf->phys,
15933 if (!dmabuf->virt) {
15937 dmabuf->buffer_tag = x;
15938 list_add_tail(&dmabuf->list, &queue->page_list);
15939 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15940 queue->q_pgs[x] = dmabuf->virt;
15942 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15943 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15944 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15945 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15947 /* notify_interval will be set during q creation */
15951 lpfc_sli4_queue_free(queue);
15956 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15957 * @phba: HBA structure that indicates port to create a queue on.
15958 * @pci_barset: PCI BAR set flag.
15960 * This function shall perform iomap of the specified PCI BAR address to host
15961 * memory address if not already done so and return it. The returned host
15962 * memory address can be NULL.
15964 static void __iomem *
15965 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15970 switch (pci_barset) {
15971 case WQ_PCI_BAR_0_AND_1:
15972 return phba->pci_bar0_memmap_p;
15973 case WQ_PCI_BAR_2_AND_3:
15974 return phba->pci_bar2_memmap_p;
15975 case WQ_PCI_BAR_4_AND_5:
15976 return phba->pci_bar4_memmap_p;
15984 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15985 * @phba: HBA structure that EQs are on.
15986 * @startq: The starting EQ index to modify
15987 * @numq: The number of EQs (consecutive indexes) to modify
15988 * @usdelay: amount of delay
15990 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15991 * is set either by writing to a register (if supported by the SLI Port)
15992 * or by mailbox command. The mailbox command allows several EQs to be
15995 * The @phba struct is used to send a mailbox command to HBA. The @startq
15996 * is used to get the starting EQ index to change. The @numq value is
15997 * used to specify how many consecutive EQ indexes, starting at EQ index,
15998 * are to be changed. This function is asynchronous and will wait for any
15999 * mailbox commands to finish before returning.
16001 * On success this function will return a zero. If unable to allocate
16002 * enough memory this function will return -ENOMEM. If a mailbox command
16003 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
16004 * have had their delay multipler changed.
16007 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
16008 uint32_t numq, uint32_t usdelay)
16010 struct lpfc_mbx_modify_eq_delay *eq_delay;
16011 LPFC_MBOXQ_t *mbox;
16012 struct lpfc_queue *eq;
16013 int cnt = 0, rc, length;
16014 uint32_t shdr_status, shdr_add_status;
16017 union lpfc_sli4_cfg_shdr *shdr;
16019 if (startq >= phba->cfg_irq_chann)
16022 if (usdelay > 0xFFFF) {
16023 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
16024 "6429 usdelay %d too large. Scaled down to "
16025 "0xFFFF.\n", usdelay);
16029 /* set values by EQ_DELAY register if supported */
16030 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
16031 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16032 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16036 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
16044 /* Otherwise, set values by mailbox cmd */
16046 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16049 "6428 Failed allocating mailbox cmd buffer."
16050 " EQ delay was not set.\n");
16053 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
16054 sizeof(struct lpfc_sli4_cfg_mhdr));
16055 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16056 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
16057 length, LPFC_SLI4_MBX_EMBED);
16058 eq_delay = &mbox->u.mqe.un.eq_delay;
16060 /* Calculate delay multiper from maximum interrupt per second */
16061 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
16064 if (dmult > LPFC_DMULT_MAX)
16065 dmult = LPFC_DMULT_MAX;
16067 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16068 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16071 eq->q_mode = usdelay;
16072 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16073 eq_delay->u.request.eq[cnt].phase = 0;
16074 eq_delay->u.request.eq[cnt].delay_multi = dmult;
16079 eq_delay->u.request.num_eq = cnt;
16081 mbox->vport = phba->pport;
16082 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16083 mbox->ctx_buf = NULL;
16084 mbox->ctx_ndlp = NULL;
16085 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16086 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16087 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16088 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16089 if (shdr_status || shdr_add_status || rc) {
16090 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16091 "2512 MODIFY_EQ_DELAY mailbox failed with "
16092 "status x%x add_status x%x, mbx status x%x\n",
16093 shdr_status, shdr_add_status, rc);
16095 mempool_free(mbox, phba->mbox_mem_pool);
16100 * lpfc_eq_create - Create an Event Queue on the HBA
16101 * @phba: HBA structure that indicates port to create a queue on.
16102 * @eq: The queue structure to use to create the event queue.
16103 * @imax: The maximum interrupt per second limit.
16105 * This function creates an event queue, as detailed in @eq, on a port,
16106 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16108 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16109 * is used to get the entry count and entry size that are necessary to
16110 * determine the number of pages to allocate and use for this queue. This
16111 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16112 * event queue. This function is asynchronous and will wait for the mailbox
16113 * command to finish before continuing.
16115 * On success this function will return a zero. If unable to allocate enough
16116 * memory this function will return -ENOMEM. If the queue create mailbox command
16117 * fails this function will return -ENXIO.
16120 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16122 struct lpfc_mbx_eq_create *eq_create;
16123 LPFC_MBOXQ_t *mbox;
16124 int rc, length, status = 0;
16125 struct lpfc_dmabuf *dmabuf;
16126 uint32_t shdr_status, shdr_add_status;
16127 union lpfc_sli4_cfg_shdr *shdr;
16129 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16131 /* sanity check on queue memory */
16134 if (!phba->sli4_hba.pc_sli4_params.supported)
16135 hw_page_size = SLI4_PAGE_SIZE;
16137 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16140 length = (sizeof(struct lpfc_mbx_eq_create) -
16141 sizeof(struct lpfc_sli4_cfg_mhdr));
16142 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16143 LPFC_MBOX_OPCODE_EQ_CREATE,
16144 length, LPFC_SLI4_MBX_EMBED);
16145 eq_create = &mbox->u.mqe.un.eq_create;
16146 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16147 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16149 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16151 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16153 /* Use version 2 of CREATE_EQ if eqav is set */
16154 if (phba->sli4_hba.pc_sli4_params.eqav) {
16155 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16156 LPFC_Q_CREATE_VERSION_2);
16157 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16158 phba->sli4_hba.pc_sli4_params.eqav);
16161 /* don't setup delay multiplier using EQ_CREATE */
16163 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16165 switch (eq->entry_count) {
16167 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16168 "0360 Unsupported EQ count. (%d)\n",
16170 if (eq->entry_count < 256) {
16174 fallthrough; /* otherwise default to smallest count */
16176 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16180 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16184 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16188 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16192 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16196 list_for_each_entry(dmabuf, &eq->page_list, list) {
16197 memset(dmabuf->virt, 0, hw_page_size);
16198 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16199 putPaddrLow(dmabuf->phys);
16200 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16201 putPaddrHigh(dmabuf->phys);
16203 mbox->vport = phba->pport;
16204 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16205 mbox->ctx_buf = NULL;
16206 mbox->ctx_ndlp = NULL;
16207 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16208 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16209 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16210 if (shdr_status || shdr_add_status || rc) {
16211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16212 "2500 EQ_CREATE mailbox failed with "
16213 "status x%x add_status x%x, mbx status x%x\n",
16214 shdr_status, shdr_add_status, rc);
16217 eq->type = LPFC_EQ;
16218 eq->subtype = LPFC_NONE;
16219 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16220 if (eq->queue_id == 0xFFFF)
16222 eq->host_index = 0;
16223 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16224 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16226 mempool_free(mbox, phba->mbox_mem_pool);
16230 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
16232 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
16234 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
16240 * lpfc_cq_create - Create a Completion Queue on the HBA
16241 * @phba: HBA structure that indicates port to create a queue on.
16242 * @cq: The queue structure to use to create the completion queue.
16243 * @eq: The event queue to bind this completion queue to.
16244 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16245 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16247 * This function creates a completion queue, as detailed in @wq, on a port,
16248 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16250 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16251 * is used to get the entry count and entry size that are necessary to
16252 * determine the number of pages to allocate and use for this queue. The @eq
16253 * is used to indicate which event queue to bind this completion queue to. This
16254 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16255 * completion queue. This function is asynchronous and will wait for the mailbox
16256 * command to finish before continuing.
16258 * On success this function will return a zero. If unable to allocate enough
16259 * memory this function will return -ENOMEM. If the queue create mailbox command
16260 * fails this function will return -ENXIO.
16263 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16264 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16266 struct lpfc_mbx_cq_create *cq_create;
16267 struct lpfc_dmabuf *dmabuf;
16268 LPFC_MBOXQ_t *mbox;
16269 int rc, length, status = 0;
16270 uint32_t shdr_status, shdr_add_status;
16271 union lpfc_sli4_cfg_shdr *shdr;
16273 /* sanity check on queue memory */
16277 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16280 length = (sizeof(struct lpfc_mbx_cq_create) -
16281 sizeof(struct lpfc_sli4_cfg_mhdr));
16282 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16283 LPFC_MBOX_OPCODE_CQ_CREATE,
16284 length, LPFC_SLI4_MBX_EMBED);
16285 cq_create = &mbox->u.mqe.un.cq_create;
16286 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16287 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16289 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16290 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16291 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16292 phba->sli4_hba.pc_sli4_params.cqv);
16293 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16294 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16295 (cq->page_size / SLI4_PAGE_SIZE));
16296 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16298 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16299 phba->sli4_hba.pc_sli4_params.cqav);
16301 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16304 switch (cq->entry_count) {
16307 if (phba->sli4_hba.pc_sli4_params.cqv ==
16308 LPFC_Q_CREATE_VERSION_2) {
16309 cq_create->u.request.context.lpfc_cq_context_count =
16311 bf_set(lpfc_cq_context_count,
16312 &cq_create->u.request.context,
16313 LPFC_CQ_CNT_WORD7);
16318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16319 "0361 Unsupported CQ count: "
16320 "entry cnt %d sz %d pg cnt %d\n",
16321 cq->entry_count, cq->entry_size,
16323 if (cq->entry_count < 256) {
16327 fallthrough; /* otherwise default to smallest count */
16329 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16333 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16337 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16341 list_for_each_entry(dmabuf, &cq->page_list, list) {
16342 memset(dmabuf->virt, 0, cq->page_size);
16343 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16344 putPaddrLow(dmabuf->phys);
16345 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16346 putPaddrHigh(dmabuf->phys);
16348 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16350 /* The IOCTL status is embedded in the mailbox subheader. */
16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16353 if (shdr_status || shdr_add_status || rc) {
16354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16355 "2501 CQ_CREATE mailbox failed with "
16356 "status x%x add_status x%x, mbx status x%x\n",
16357 shdr_status, shdr_add_status, rc);
16361 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16362 if (cq->queue_id == 0xFFFF) {
16366 /* link the cq onto the parent eq child list */
16367 list_add_tail(&cq->list, &eq->child_list);
16368 /* Set up completion queue's type and subtype */
16370 cq->subtype = subtype;
16371 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16372 cq->assoc_qid = eq->queue_id;
16374 cq->host_index = 0;
16375 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16376 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16378 if (cq->queue_id > phba->sli4_hba.cq_max)
16379 phba->sli4_hba.cq_max = cq->queue_id;
16381 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16383 mempool_free(mbox, phba->mbox_mem_pool);
16388 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16389 * @phba: HBA structure that indicates port to create a queue on.
16390 * @cqp: The queue structure array to use to create the completion queues.
16391 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16392 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16393 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16395 * This function creates a set of completion queue, s to support MRQ
16396 * as detailed in @cqp, on a port,
16397 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16399 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16400 * is used to get the entry count and entry size that are necessary to
16401 * determine the number of pages to allocate and use for this queue. The @eq
16402 * is used to indicate which event queue to bind this completion queue to. This
16403 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16404 * completion queue. This function is asynchronous and will wait for the mailbox
16405 * command to finish before continuing.
16407 * On success this function will return a zero. If unable to allocate enough
16408 * memory this function will return -ENOMEM. If the queue create mailbox command
16409 * fails this function will return -ENXIO.
16412 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16413 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16416 struct lpfc_queue *cq;
16417 struct lpfc_queue *eq;
16418 struct lpfc_mbx_cq_create_set *cq_set;
16419 struct lpfc_dmabuf *dmabuf;
16420 LPFC_MBOXQ_t *mbox;
16421 int rc, length, alloclen, status = 0;
16422 int cnt, idx, numcq, page_idx = 0;
16423 uint32_t shdr_status, shdr_add_status;
16424 union lpfc_sli4_cfg_shdr *shdr;
16425 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16427 /* sanity check on queue memory */
16428 numcq = phba->cfg_nvmet_mrq;
16429 if (!cqp || !hdwq || !numcq)
16432 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16436 length = sizeof(struct lpfc_mbx_cq_create_set);
16437 length += ((numcq * cqp[0]->page_count) *
16438 sizeof(struct dma_address));
16439 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16440 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16441 LPFC_SLI4_MBX_NEMBED);
16442 if (alloclen < length) {
16443 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16444 "3098 Allocated DMA memory size (%d) is "
16445 "less than the requested DMA memory size "
16446 "(%d)\n", alloclen, length);
16450 cq_set = mbox->sge_array->addr[0];
16451 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16452 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16454 for (idx = 0; idx < numcq; idx++) {
16456 eq = hdwq[idx].hba_eq;
16461 if (!phba->sli4_hba.pc_sli4_params.supported)
16462 hw_page_size = cq->page_size;
16466 bf_set(lpfc_mbx_cq_create_set_page_size,
16467 &cq_set->u.request,
16468 (hw_page_size / SLI4_PAGE_SIZE));
16469 bf_set(lpfc_mbx_cq_create_set_num_pages,
16470 &cq_set->u.request, cq->page_count);
16471 bf_set(lpfc_mbx_cq_create_set_evt,
16472 &cq_set->u.request, 1);
16473 bf_set(lpfc_mbx_cq_create_set_valid,
16474 &cq_set->u.request, 1);
16475 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16476 &cq_set->u.request, 0);
16477 bf_set(lpfc_mbx_cq_create_set_num_cq,
16478 &cq_set->u.request, numcq);
16479 bf_set(lpfc_mbx_cq_create_set_autovalid,
16480 &cq_set->u.request,
16481 phba->sli4_hba.pc_sli4_params.cqav);
16482 switch (cq->entry_count) {
16485 if (phba->sli4_hba.pc_sli4_params.cqv ==
16486 LPFC_Q_CREATE_VERSION_2) {
16487 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16488 &cq_set->u.request,
16490 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16491 &cq_set->u.request,
16492 LPFC_CQ_CNT_WORD7);
16497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16498 "3118 Bad CQ count. (%d)\n",
16500 if (cq->entry_count < 256) {
16504 fallthrough; /* otherwise default to smallest */
16506 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16507 &cq_set->u.request, LPFC_CQ_CNT_256);
16510 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16511 &cq_set->u.request, LPFC_CQ_CNT_512);
16514 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16515 &cq_set->u.request, LPFC_CQ_CNT_1024);
16518 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16519 &cq_set->u.request, eq->queue_id);
16522 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16523 &cq_set->u.request, eq->queue_id);
16526 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16527 &cq_set->u.request, eq->queue_id);
16530 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16531 &cq_set->u.request, eq->queue_id);
16534 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16535 &cq_set->u.request, eq->queue_id);
16538 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16539 &cq_set->u.request, eq->queue_id);
16542 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16543 &cq_set->u.request, eq->queue_id);
16546 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16547 &cq_set->u.request, eq->queue_id);
16550 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16551 &cq_set->u.request, eq->queue_id);
16554 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16555 &cq_set->u.request, eq->queue_id);
16558 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16559 &cq_set->u.request, eq->queue_id);
16562 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16563 &cq_set->u.request, eq->queue_id);
16566 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16567 &cq_set->u.request, eq->queue_id);
16570 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16571 &cq_set->u.request, eq->queue_id);
16574 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16575 &cq_set->u.request, eq->queue_id);
16578 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16579 &cq_set->u.request, eq->queue_id);
16583 /* link the cq onto the parent eq child list */
16584 list_add_tail(&cq->list, &eq->child_list);
16585 /* Set up completion queue's type and subtype */
16587 cq->subtype = subtype;
16588 cq->assoc_qid = eq->queue_id;
16590 cq->host_index = 0;
16591 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16592 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16597 list_for_each_entry(dmabuf, &cq->page_list, list) {
16598 memset(dmabuf->virt, 0, hw_page_size);
16599 cnt = page_idx + dmabuf->buffer_tag;
16600 cq_set->u.request.page[cnt].addr_lo =
16601 putPaddrLow(dmabuf->phys);
16602 cq_set->u.request.page[cnt].addr_hi =
16603 putPaddrHigh(dmabuf->phys);
16609 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16611 /* The IOCTL status is embedded in the mailbox subheader. */
16612 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16613 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16614 if (shdr_status || shdr_add_status || rc) {
16615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16616 "3119 CQ_CREATE_SET mailbox failed with "
16617 "status x%x add_status x%x, mbx status x%x\n",
16618 shdr_status, shdr_add_status, rc);
16622 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16623 if (rc == 0xFFFF) {
16628 for (idx = 0; idx < numcq; idx++) {
16630 cq->queue_id = rc + idx;
16631 if (cq->queue_id > phba->sli4_hba.cq_max)
16632 phba->sli4_hba.cq_max = cq->queue_id;
16636 lpfc_sli4_mbox_cmd_free(phba, mbox);
16641 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16642 * @phba: HBA structure that indicates port to create a queue on.
16643 * @mq: The queue structure to use to create the mailbox queue.
16644 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16645 * @cq: The completion queue to associate with this cq.
16647 * This function provides failback (fb) functionality when the
16648 * mq_create_ext fails on older FW generations. It's purpose is identical
16649 * to mq_create_ext otherwise.
16651 * This routine cannot fail as all attributes were previously accessed and
16652 * initialized in mq_create_ext.
16655 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16656 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16658 struct lpfc_mbx_mq_create *mq_create;
16659 struct lpfc_dmabuf *dmabuf;
16662 length = (sizeof(struct lpfc_mbx_mq_create) -
16663 sizeof(struct lpfc_sli4_cfg_mhdr));
16664 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16665 LPFC_MBOX_OPCODE_MQ_CREATE,
16666 length, LPFC_SLI4_MBX_EMBED);
16667 mq_create = &mbox->u.mqe.un.mq_create;
16668 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16670 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16672 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16673 switch (mq->entry_count) {
16675 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16676 LPFC_MQ_RING_SIZE_16);
16679 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16680 LPFC_MQ_RING_SIZE_32);
16683 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16684 LPFC_MQ_RING_SIZE_64);
16687 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16688 LPFC_MQ_RING_SIZE_128);
16691 list_for_each_entry(dmabuf, &mq->page_list, list) {
16692 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16693 putPaddrLow(dmabuf->phys);
16694 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16695 putPaddrHigh(dmabuf->phys);
16700 * lpfc_mq_create - Create a mailbox Queue on the HBA
16701 * @phba: HBA structure that indicates port to create a queue on.
16702 * @mq: The queue structure to use to create the mailbox queue.
16703 * @cq: The completion queue to associate with this cq.
16704 * @subtype: The queue's subtype.
16706 * This function creates a mailbox queue, as detailed in @mq, on a port,
16707 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16709 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16710 * is used to get the entry count and entry size that are necessary to
16711 * determine the number of pages to allocate and use for this queue. This
16712 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16713 * mailbox queue. This function is asynchronous and will wait for the mailbox
16714 * command to finish before continuing.
16716 * On success this function will return a zero. If unable to allocate enough
16717 * memory this function will return -ENOMEM. If the queue create mailbox command
16718 * fails this function will return -ENXIO.
16721 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16722 struct lpfc_queue *cq, uint32_t subtype)
16724 struct lpfc_mbx_mq_create *mq_create;
16725 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16726 struct lpfc_dmabuf *dmabuf;
16727 LPFC_MBOXQ_t *mbox;
16728 int rc, length, status = 0;
16729 uint32_t shdr_status, shdr_add_status;
16730 union lpfc_sli4_cfg_shdr *shdr;
16731 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16733 /* sanity check on queue memory */
16736 if (!phba->sli4_hba.pc_sli4_params.supported)
16737 hw_page_size = SLI4_PAGE_SIZE;
16739 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16742 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16743 sizeof(struct lpfc_sli4_cfg_mhdr));
16744 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16745 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16746 length, LPFC_SLI4_MBX_EMBED);
16748 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16749 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16750 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16751 &mq_create_ext->u.request, mq->page_count);
16752 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16753 &mq_create_ext->u.request, 1);
16754 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16755 &mq_create_ext->u.request, 1);
16756 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16757 &mq_create_ext->u.request, 1);
16758 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16759 &mq_create_ext->u.request, 1);
16760 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16761 &mq_create_ext->u.request, 1);
16762 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16763 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16764 phba->sli4_hba.pc_sli4_params.mqv);
16765 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16766 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16769 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16771 switch (mq->entry_count) {
16773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16774 "0362 Unsupported MQ count. (%d)\n",
16776 if (mq->entry_count < 16) {
16780 fallthrough; /* otherwise default to smallest count */
16782 bf_set(lpfc_mq_context_ring_size,
16783 &mq_create_ext->u.request.context,
16784 LPFC_MQ_RING_SIZE_16);
16787 bf_set(lpfc_mq_context_ring_size,
16788 &mq_create_ext->u.request.context,
16789 LPFC_MQ_RING_SIZE_32);
16792 bf_set(lpfc_mq_context_ring_size,
16793 &mq_create_ext->u.request.context,
16794 LPFC_MQ_RING_SIZE_64);
16797 bf_set(lpfc_mq_context_ring_size,
16798 &mq_create_ext->u.request.context,
16799 LPFC_MQ_RING_SIZE_128);
16802 list_for_each_entry(dmabuf, &mq->page_list, list) {
16803 memset(dmabuf->virt, 0, hw_page_size);
16804 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16805 putPaddrLow(dmabuf->phys);
16806 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16807 putPaddrHigh(dmabuf->phys);
16809 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16810 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16811 &mq_create_ext->u.response);
16812 if (rc != MBX_SUCCESS) {
16813 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16814 "2795 MQ_CREATE_EXT failed with "
16815 "status x%x. Failback to MQ_CREATE.\n",
16817 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16818 mq_create = &mbox->u.mqe.un.mq_create;
16819 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16820 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16821 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16822 &mq_create->u.response);
16825 /* The IOCTL status is embedded in the mailbox subheader. */
16826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16828 if (shdr_status || shdr_add_status || rc) {
16829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16830 "2502 MQ_CREATE mailbox failed with "
16831 "status x%x add_status x%x, mbx status x%x\n",
16832 shdr_status, shdr_add_status, rc);
16836 if (mq->queue_id == 0xFFFF) {
16840 mq->type = LPFC_MQ;
16841 mq->assoc_qid = cq->queue_id;
16842 mq->subtype = subtype;
16843 mq->host_index = 0;
16846 /* link the mq onto the parent cq child list */
16847 list_add_tail(&mq->list, &cq->child_list);
16849 mempool_free(mbox, phba->mbox_mem_pool);
16854 * lpfc_wq_create - Create a Work Queue on the HBA
16855 * @phba: HBA structure that indicates port to create a queue on.
16856 * @wq: The queue structure to use to create the work queue.
16857 * @cq: The completion queue to bind this work queue to.
16858 * @subtype: The subtype of the work queue indicating its functionality.
16860 * This function creates a work queue, as detailed in @wq, on a port, described
16861 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16863 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16864 * is used to get the entry count and entry size that are necessary to
16865 * determine the number of pages to allocate and use for this queue. The @cq
16866 * is used to indicate which completion queue to bind this work queue to. This
16867 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16868 * work queue. This function is asynchronous and will wait for the mailbox
16869 * command to finish before continuing.
16871 * On success this function will return a zero. If unable to allocate enough
16872 * memory this function will return -ENOMEM. If the queue create mailbox command
16873 * fails this function will return -ENXIO.
16876 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16877 struct lpfc_queue *cq, uint32_t subtype)
16879 struct lpfc_mbx_wq_create *wq_create;
16880 struct lpfc_dmabuf *dmabuf;
16881 LPFC_MBOXQ_t *mbox;
16882 int rc, length, status = 0;
16883 uint32_t shdr_status, shdr_add_status;
16884 union lpfc_sli4_cfg_shdr *shdr;
16885 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16886 struct dma_address *page;
16887 void __iomem *bar_memmap_p;
16888 uint32_t db_offset;
16889 uint16_t pci_barset;
16890 uint8_t dpp_barset;
16891 uint32_t dpp_offset;
16892 uint8_t wq_create_version;
16894 unsigned long pg_addr;
16897 /* sanity check on queue memory */
16900 if (!phba->sli4_hba.pc_sli4_params.supported)
16901 hw_page_size = wq->page_size;
16903 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16906 length = (sizeof(struct lpfc_mbx_wq_create) -
16907 sizeof(struct lpfc_sli4_cfg_mhdr));
16908 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16909 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16910 length, LPFC_SLI4_MBX_EMBED);
16911 wq_create = &mbox->u.mqe.un.wq_create;
16912 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16913 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16915 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16918 /* wqv is the earliest version supported, NOT the latest */
16919 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16920 phba->sli4_hba.pc_sli4_params.wqv);
16922 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16923 (wq->page_size > SLI4_PAGE_SIZE))
16924 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16926 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16928 switch (wq_create_version) {
16929 case LPFC_Q_CREATE_VERSION_1:
16930 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16932 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16933 LPFC_Q_CREATE_VERSION_1);
16935 switch (wq->entry_size) {
16938 bf_set(lpfc_mbx_wq_create_wqe_size,
16939 &wq_create->u.request_1,
16940 LPFC_WQ_WQE_SIZE_64);
16943 bf_set(lpfc_mbx_wq_create_wqe_size,
16944 &wq_create->u.request_1,
16945 LPFC_WQ_WQE_SIZE_128);
16948 /* Request DPP by default */
16949 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16950 bf_set(lpfc_mbx_wq_create_page_size,
16951 &wq_create->u.request_1,
16952 (wq->page_size / SLI4_PAGE_SIZE));
16953 page = wq_create->u.request_1.page;
16956 page = wq_create->u.request.page;
16960 list_for_each_entry(dmabuf, &wq->page_list, list) {
16961 memset(dmabuf->virt, 0, hw_page_size);
16962 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16963 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16966 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16967 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16969 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16970 /* The IOCTL status is embedded in the mailbox subheader. */
16971 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16972 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16973 if (shdr_status || shdr_add_status || rc) {
16974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16975 "2503 WQ_CREATE mailbox failed with "
16976 "status x%x add_status x%x, mbx status x%x\n",
16977 shdr_status, shdr_add_status, rc);
16982 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16983 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16984 &wq_create->u.response);
16986 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16987 &wq_create->u.response_1);
16989 if (wq->queue_id == 0xFFFF) {
16994 wq->db_format = LPFC_DB_LIST_FORMAT;
16995 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16996 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16997 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16998 &wq_create->u.response);
16999 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
17000 (wq->db_format != LPFC_DB_RING_FORMAT)) {
17001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17002 "3265 WQ[%d] doorbell format "
17003 "not supported: x%x\n",
17004 wq->queue_id, wq->db_format);
17008 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
17009 &wq_create->u.response);
17010 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17012 if (!bar_memmap_p) {
17013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17014 "3263 WQ[%d] failed to memmap "
17015 "pci barset:x%x\n",
17016 wq->queue_id, pci_barset);
17020 db_offset = wq_create->u.response.doorbell_offset;
17021 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17022 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17024 "3252 WQ[%d] doorbell offset "
17025 "not supported: x%x\n",
17026 wq->queue_id, db_offset);
17030 wq->db_regaddr = bar_memmap_p + db_offset;
17031 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17032 "3264 WQ[%d]: barset:x%x, offset:x%x, "
17033 "format:x%x\n", wq->queue_id,
17034 pci_barset, db_offset, wq->db_format);
17036 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17038 /* Check if DPP was honored by the firmware */
17039 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17040 &wq_create->u.response_1);
17041 if (wq->dpp_enable) {
17042 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17043 &wq_create->u.response_1);
17044 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17046 if (!bar_memmap_p) {
17047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17048 "3267 WQ[%d] failed to memmap "
17049 "pci barset:x%x\n",
17050 wq->queue_id, pci_barset);
17054 db_offset = wq_create->u.response_1.doorbell_offset;
17055 wq->db_regaddr = bar_memmap_p + db_offset;
17056 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17057 &wq_create->u.response_1);
17058 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17059 &wq_create->u.response_1);
17060 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17062 if (!bar_memmap_p) {
17063 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17064 "3268 WQ[%d] failed to memmap "
17065 "pci barset:x%x\n",
17066 wq->queue_id, dpp_barset);
17070 dpp_offset = wq_create->u.response_1.dpp_offset;
17071 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17073 "3271 WQ[%d]: barset:x%x, offset:x%x, "
17074 "dpp_id:x%x dpp_barset:x%x "
17075 "dpp_offset:x%x\n",
17076 wq->queue_id, pci_barset, db_offset,
17077 wq->dpp_id, dpp_barset, dpp_offset);
17080 /* Enable combined writes for DPP aperture */
17081 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
17082 rc = set_memory_wc(pg_addr, 1);
17084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17085 "3272 Cannot setup Combined "
17086 "Write on WQ[%d] - disable DPP\n",
17088 phba->cfg_enable_dpp = 0;
17091 phba->cfg_enable_dpp = 0;
17094 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17096 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17097 if (wq->pring == NULL) {
17101 wq->type = LPFC_WQ;
17102 wq->assoc_qid = cq->queue_id;
17103 wq->subtype = subtype;
17104 wq->host_index = 0;
17106 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17108 /* link the wq onto the parent cq child list */
17109 list_add_tail(&wq->list, &cq->child_list);
17111 mempool_free(mbox, phba->mbox_mem_pool);
17116 * lpfc_rq_create - Create a Receive Queue on the HBA
17117 * @phba: HBA structure that indicates port to create a queue on.
17118 * @hrq: The queue structure to use to create the header receive queue.
17119 * @drq: The queue structure to use to create the data receive queue.
17120 * @cq: The completion queue to bind this work queue to.
17121 * @subtype: The subtype of the work queue indicating its functionality.
17123 * This function creates a receive buffer queue pair , as detailed in @hrq and
17124 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17127 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17128 * struct is used to get the entry count that is necessary to determine the
17129 * number of pages to use for this queue. The @cq is used to indicate which
17130 * completion queue to bind received buffers that are posted to these queues to.
17131 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17132 * receive queue pair. This function is asynchronous and will wait for the
17133 * mailbox command to finish before continuing.
17135 * On success this function will return a zero. If unable to allocate enough
17136 * memory this function will return -ENOMEM. If the queue create mailbox command
17137 * fails this function will return -ENXIO.
17140 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17141 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17143 struct lpfc_mbx_rq_create *rq_create;
17144 struct lpfc_dmabuf *dmabuf;
17145 LPFC_MBOXQ_t *mbox;
17146 int rc, length, status = 0;
17147 uint32_t shdr_status, shdr_add_status;
17148 union lpfc_sli4_cfg_shdr *shdr;
17149 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17150 void __iomem *bar_memmap_p;
17151 uint32_t db_offset;
17152 uint16_t pci_barset;
17154 /* sanity check on queue memory */
17155 if (!hrq || !drq || !cq)
17157 if (!phba->sli4_hba.pc_sli4_params.supported)
17158 hw_page_size = SLI4_PAGE_SIZE;
17160 if (hrq->entry_count != drq->entry_count)
17162 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17165 length = (sizeof(struct lpfc_mbx_rq_create) -
17166 sizeof(struct lpfc_sli4_cfg_mhdr));
17167 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17168 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17169 length, LPFC_SLI4_MBX_EMBED);
17170 rq_create = &mbox->u.mqe.un.rq_create;
17171 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17172 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17173 phba->sli4_hba.pc_sli4_params.rqv);
17174 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17175 bf_set(lpfc_rq_context_rqe_count_1,
17176 &rq_create->u.request.context,
17178 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17179 bf_set(lpfc_rq_context_rqe_size,
17180 &rq_create->u.request.context,
17182 bf_set(lpfc_rq_context_page_size,
17183 &rq_create->u.request.context,
17184 LPFC_RQ_PAGE_SIZE_4096);
17186 switch (hrq->entry_count) {
17188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17189 "2535 Unsupported RQ count. (%d)\n",
17191 if (hrq->entry_count < 512) {
17195 fallthrough; /* otherwise default to smallest count */
17197 bf_set(lpfc_rq_context_rqe_count,
17198 &rq_create->u.request.context,
17199 LPFC_RQ_RING_SIZE_512);
17202 bf_set(lpfc_rq_context_rqe_count,
17203 &rq_create->u.request.context,
17204 LPFC_RQ_RING_SIZE_1024);
17207 bf_set(lpfc_rq_context_rqe_count,
17208 &rq_create->u.request.context,
17209 LPFC_RQ_RING_SIZE_2048);
17212 bf_set(lpfc_rq_context_rqe_count,
17213 &rq_create->u.request.context,
17214 LPFC_RQ_RING_SIZE_4096);
17217 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17218 LPFC_HDR_BUF_SIZE);
17220 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17222 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17224 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17225 memset(dmabuf->virt, 0, hw_page_size);
17226 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17227 putPaddrLow(dmabuf->phys);
17228 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17229 putPaddrHigh(dmabuf->phys);
17231 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17232 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17234 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17235 /* The IOCTL status is embedded in the mailbox subheader. */
17236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17238 if (shdr_status || shdr_add_status || rc) {
17239 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17240 "2504 RQ_CREATE mailbox failed with "
17241 "status x%x add_status x%x, mbx status x%x\n",
17242 shdr_status, shdr_add_status, rc);
17246 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17247 if (hrq->queue_id == 0xFFFF) {
17252 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17253 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17254 &rq_create->u.response);
17255 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17256 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17258 "3262 RQ [%d] doorbell format not "
17259 "supported: x%x\n", hrq->queue_id,
17265 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17266 &rq_create->u.response);
17267 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17268 if (!bar_memmap_p) {
17269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17270 "3269 RQ[%d] failed to memmap pci "
17271 "barset:x%x\n", hrq->queue_id,
17277 db_offset = rq_create->u.response.doorbell_offset;
17278 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17279 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17281 "3270 RQ[%d] doorbell offset not "
17282 "supported: x%x\n", hrq->queue_id,
17287 hrq->db_regaddr = bar_memmap_p + db_offset;
17288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17289 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17290 "format:x%x\n", hrq->queue_id, pci_barset,
17291 db_offset, hrq->db_format);
17293 hrq->db_format = LPFC_DB_RING_FORMAT;
17294 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17296 hrq->type = LPFC_HRQ;
17297 hrq->assoc_qid = cq->queue_id;
17298 hrq->subtype = subtype;
17299 hrq->host_index = 0;
17300 hrq->hba_index = 0;
17301 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17303 /* now create the data queue */
17304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17305 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17306 length, LPFC_SLI4_MBX_EMBED);
17307 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17308 phba->sli4_hba.pc_sli4_params.rqv);
17309 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17310 bf_set(lpfc_rq_context_rqe_count_1,
17311 &rq_create->u.request.context, hrq->entry_count);
17312 if (subtype == LPFC_NVMET)
17313 rq_create->u.request.context.buffer_size =
17314 LPFC_NVMET_DATA_BUF_SIZE;
17316 rq_create->u.request.context.buffer_size =
17317 LPFC_DATA_BUF_SIZE;
17318 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17320 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17321 (PAGE_SIZE/SLI4_PAGE_SIZE));
17323 switch (drq->entry_count) {
17325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17326 "2536 Unsupported RQ count. (%d)\n",
17328 if (drq->entry_count < 512) {
17332 fallthrough; /* otherwise default to smallest count */
17334 bf_set(lpfc_rq_context_rqe_count,
17335 &rq_create->u.request.context,
17336 LPFC_RQ_RING_SIZE_512);
17339 bf_set(lpfc_rq_context_rqe_count,
17340 &rq_create->u.request.context,
17341 LPFC_RQ_RING_SIZE_1024);
17344 bf_set(lpfc_rq_context_rqe_count,
17345 &rq_create->u.request.context,
17346 LPFC_RQ_RING_SIZE_2048);
17349 bf_set(lpfc_rq_context_rqe_count,
17350 &rq_create->u.request.context,
17351 LPFC_RQ_RING_SIZE_4096);
17354 if (subtype == LPFC_NVMET)
17355 bf_set(lpfc_rq_context_buf_size,
17356 &rq_create->u.request.context,
17357 LPFC_NVMET_DATA_BUF_SIZE);
17359 bf_set(lpfc_rq_context_buf_size,
17360 &rq_create->u.request.context,
17361 LPFC_DATA_BUF_SIZE);
17363 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17365 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17367 list_for_each_entry(dmabuf, &drq->page_list, list) {
17368 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17369 putPaddrLow(dmabuf->phys);
17370 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17371 putPaddrHigh(dmabuf->phys);
17373 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17374 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17375 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17376 /* The IOCTL status is embedded in the mailbox subheader. */
17377 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17378 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17379 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17380 if (shdr_status || shdr_add_status || rc) {
17384 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17385 if (drq->queue_id == 0xFFFF) {
17389 drq->type = LPFC_DRQ;
17390 drq->assoc_qid = cq->queue_id;
17391 drq->subtype = subtype;
17392 drq->host_index = 0;
17393 drq->hba_index = 0;
17394 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17396 /* link the header and data RQs onto the parent cq child list */
17397 list_add_tail(&hrq->list, &cq->child_list);
17398 list_add_tail(&drq->list, &cq->child_list);
17401 mempool_free(mbox, phba->mbox_mem_pool);
17406 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17407 * @phba: HBA structure that indicates port to create a queue on.
17408 * @hrqp: The queue structure array to use to create the header receive queues.
17409 * @drqp: The queue structure array to use to create the data receive queues.
17410 * @cqp: The completion queue array to bind these receive queues to.
17411 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17413 * This function creates a receive buffer queue pair , as detailed in @hrq and
17414 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17417 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17418 * struct is used to get the entry count that is necessary to determine the
17419 * number of pages to use for this queue. The @cq is used to indicate which
17420 * completion queue to bind received buffers that are posted to these queues to.
17421 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17422 * receive queue pair. This function is asynchronous and will wait for the
17423 * mailbox command to finish before continuing.
17425 * On success this function will return a zero. If unable to allocate enough
17426 * memory this function will return -ENOMEM. If the queue create mailbox command
17427 * fails this function will return -ENXIO.
17430 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17431 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17434 struct lpfc_queue *hrq, *drq, *cq;
17435 struct lpfc_mbx_rq_create_v2 *rq_create;
17436 struct lpfc_dmabuf *dmabuf;
17437 LPFC_MBOXQ_t *mbox;
17438 int rc, length, alloclen, status = 0;
17439 int cnt, idx, numrq, page_idx = 0;
17440 uint32_t shdr_status, shdr_add_status;
17441 union lpfc_sli4_cfg_shdr *shdr;
17442 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17444 numrq = phba->cfg_nvmet_mrq;
17445 /* sanity check on array memory */
17446 if (!hrqp || !drqp || !cqp || !numrq)
17448 if (!phba->sli4_hba.pc_sli4_params.supported)
17449 hw_page_size = SLI4_PAGE_SIZE;
17451 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17455 length = sizeof(struct lpfc_mbx_rq_create_v2);
17456 length += ((2 * numrq * hrqp[0]->page_count) *
17457 sizeof(struct dma_address));
17459 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17460 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17461 LPFC_SLI4_MBX_NEMBED);
17462 if (alloclen < length) {
17463 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17464 "3099 Allocated DMA memory size (%d) is "
17465 "less than the requested DMA memory size "
17466 "(%d)\n", alloclen, length);
17473 rq_create = mbox->sge_array->addr[0];
17474 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17476 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17479 for (idx = 0; idx < numrq; idx++) {
17484 /* sanity check on queue memory */
17485 if (!hrq || !drq || !cq) {
17490 if (hrq->entry_count != drq->entry_count) {
17496 bf_set(lpfc_mbx_rq_create_num_pages,
17497 &rq_create->u.request,
17499 bf_set(lpfc_mbx_rq_create_rq_cnt,
17500 &rq_create->u.request, (numrq * 2));
17501 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17503 bf_set(lpfc_rq_context_base_cq,
17504 &rq_create->u.request.context,
17506 bf_set(lpfc_rq_context_data_size,
17507 &rq_create->u.request.context,
17508 LPFC_NVMET_DATA_BUF_SIZE);
17509 bf_set(lpfc_rq_context_hdr_size,
17510 &rq_create->u.request.context,
17511 LPFC_HDR_BUF_SIZE);
17512 bf_set(lpfc_rq_context_rqe_count_1,
17513 &rq_create->u.request.context,
17515 bf_set(lpfc_rq_context_rqe_size,
17516 &rq_create->u.request.context,
17518 bf_set(lpfc_rq_context_page_size,
17519 &rq_create->u.request.context,
17520 (PAGE_SIZE/SLI4_PAGE_SIZE));
17523 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17524 memset(dmabuf->virt, 0, hw_page_size);
17525 cnt = page_idx + dmabuf->buffer_tag;
17526 rq_create->u.request.page[cnt].addr_lo =
17527 putPaddrLow(dmabuf->phys);
17528 rq_create->u.request.page[cnt].addr_hi =
17529 putPaddrHigh(dmabuf->phys);
17535 list_for_each_entry(dmabuf, &drq->page_list, list) {
17536 memset(dmabuf->virt, 0, hw_page_size);
17537 cnt = page_idx + dmabuf->buffer_tag;
17538 rq_create->u.request.page[cnt].addr_lo =
17539 putPaddrLow(dmabuf->phys);
17540 rq_create->u.request.page[cnt].addr_hi =
17541 putPaddrHigh(dmabuf->phys);
17546 hrq->db_format = LPFC_DB_RING_FORMAT;
17547 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17548 hrq->type = LPFC_HRQ;
17549 hrq->assoc_qid = cq->queue_id;
17550 hrq->subtype = subtype;
17551 hrq->host_index = 0;
17552 hrq->hba_index = 0;
17553 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17555 drq->db_format = LPFC_DB_RING_FORMAT;
17556 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17557 drq->type = LPFC_DRQ;
17558 drq->assoc_qid = cq->queue_id;
17559 drq->subtype = subtype;
17560 drq->host_index = 0;
17561 drq->hba_index = 0;
17562 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17564 list_add_tail(&hrq->list, &cq->child_list);
17565 list_add_tail(&drq->list, &cq->child_list);
17568 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17569 /* The IOCTL status is embedded in the mailbox subheader. */
17570 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17571 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17572 if (shdr_status || shdr_add_status || rc) {
17573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17574 "3120 RQ_CREATE mailbox failed with "
17575 "status x%x add_status x%x, mbx status x%x\n",
17576 shdr_status, shdr_add_status, rc);
17580 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17581 if (rc == 0xFFFF) {
17586 /* Initialize all RQs with associated queue id */
17587 for (idx = 0; idx < numrq; idx++) {
17589 hrq->queue_id = rc + (2 * idx);
17591 drq->queue_id = rc + (2 * idx) + 1;
17595 lpfc_sli4_mbox_cmd_free(phba, mbox);
17600 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17601 * @phba: HBA structure that indicates port to destroy a queue on.
17602 * @eq: The queue structure associated with the queue to destroy.
17604 * This function destroys a queue, as detailed in @eq by sending an mailbox
17605 * command, specific to the type of queue, to the HBA.
17607 * The @eq struct is used to get the queue ID of the queue to destroy.
17609 * On success this function will return a zero. If the queue destroy mailbox
17610 * command fails this function will return -ENXIO.
17613 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17615 LPFC_MBOXQ_t *mbox;
17616 int rc, length, status = 0;
17617 uint32_t shdr_status, shdr_add_status;
17618 union lpfc_sli4_cfg_shdr *shdr;
17620 /* sanity check on queue memory */
17624 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17627 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17628 sizeof(struct lpfc_sli4_cfg_mhdr));
17629 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17630 LPFC_MBOX_OPCODE_EQ_DESTROY,
17631 length, LPFC_SLI4_MBX_EMBED);
17632 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17634 mbox->vport = eq->phba->pport;
17635 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17637 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17638 /* The IOCTL status is embedded in the mailbox subheader. */
17639 shdr = (union lpfc_sli4_cfg_shdr *)
17640 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17641 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17642 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17643 if (shdr_status || shdr_add_status || rc) {
17644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17645 "2505 EQ_DESTROY mailbox failed with "
17646 "status x%x add_status x%x, mbx status x%x\n",
17647 shdr_status, shdr_add_status, rc);
17651 /* Remove eq from any list */
17652 list_del_init(&eq->list);
17653 mempool_free(mbox, eq->phba->mbox_mem_pool);
17658 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17659 * @phba: HBA structure that indicates port to destroy a queue on.
17660 * @cq: The queue structure associated with the queue to destroy.
17662 * This function destroys a queue, as detailed in @cq by sending an mailbox
17663 * command, specific to the type of queue, to the HBA.
17665 * The @cq struct is used to get the queue ID of the queue to destroy.
17667 * On success this function will return a zero. If the queue destroy mailbox
17668 * command fails this function will return -ENXIO.
17671 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17673 LPFC_MBOXQ_t *mbox;
17674 int rc, length, status = 0;
17675 uint32_t shdr_status, shdr_add_status;
17676 union lpfc_sli4_cfg_shdr *shdr;
17678 /* sanity check on queue memory */
17681 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17684 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17685 sizeof(struct lpfc_sli4_cfg_mhdr));
17686 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17687 LPFC_MBOX_OPCODE_CQ_DESTROY,
17688 length, LPFC_SLI4_MBX_EMBED);
17689 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17691 mbox->vport = cq->phba->pport;
17692 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17693 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17694 /* The IOCTL status is embedded in the mailbox subheader. */
17695 shdr = (union lpfc_sli4_cfg_shdr *)
17696 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17697 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17698 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17699 if (shdr_status || shdr_add_status || rc) {
17700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17701 "2506 CQ_DESTROY mailbox failed with "
17702 "status x%x add_status x%x, mbx status x%x\n",
17703 shdr_status, shdr_add_status, rc);
17706 /* Remove cq from any list */
17707 list_del_init(&cq->list);
17708 mempool_free(mbox, cq->phba->mbox_mem_pool);
17713 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17714 * @phba: HBA structure that indicates port to destroy a queue on.
17715 * @mq: The queue structure associated with the queue to destroy.
17717 * This function destroys a queue, as detailed in @mq by sending an mailbox
17718 * command, specific to the type of queue, to the HBA.
17720 * The @mq struct is used to get the queue ID of the queue to destroy.
17722 * On success this function will return a zero. If the queue destroy mailbox
17723 * command fails this function will return -ENXIO.
17726 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17728 LPFC_MBOXQ_t *mbox;
17729 int rc, length, status = 0;
17730 uint32_t shdr_status, shdr_add_status;
17731 union lpfc_sli4_cfg_shdr *shdr;
17733 /* sanity check on queue memory */
17736 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17739 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17740 sizeof(struct lpfc_sli4_cfg_mhdr));
17741 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17742 LPFC_MBOX_OPCODE_MQ_DESTROY,
17743 length, LPFC_SLI4_MBX_EMBED);
17744 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17746 mbox->vport = mq->phba->pport;
17747 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17748 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17749 /* The IOCTL status is embedded in the mailbox subheader. */
17750 shdr = (union lpfc_sli4_cfg_shdr *)
17751 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17752 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17753 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17754 if (shdr_status || shdr_add_status || rc) {
17755 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17756 "2507 MQ_DESTROY mailbox failed with "
17757 "status x%x add_status x%x, mbx status x%x\n",
17758 shdr_status, shdr_add_status, rc);
17761 /* Remove mq from any list */
17762 list_del_init(&mq->list);
17763 mempool_free(mbox, mq->phba->mbox_mem_pool);
17768 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17769 * @phba: HBA structure that indicates port to destroy a queue on.
17770 * @wq: The queue structure associated with the queue to destroy.
17772 * This function destroys a queue, as detailed in @wq by sending an mailbox
17773 * command, specific to the type of queue, to the HBA.
17775 * The @wq struct is used to get the queue ID of the queue to destroy.
17777 * On success this function will return a zero. If the queue destroy mailbox
17778 * command fails this function will return -ENXIO.
17781 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17783 LPFC_MBOXQ_t *mbox;
17784 int rc, length, status = 0;
17785 uint32_t shdr_status, shdr_add_status;
17786 union lpfc_sli4_cfg_shdr *shdr;
17788 /* sanity check on queue memory */
17791 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17794 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17795 sizeof(struct lpfc_sli4_cfg_mhdr));
17796 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17797 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17798 length, LPFC_SLI4_MBX_EMBED);
17799 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17801 mbox->vport = wq->phba->pport;
17802 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17803 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17804 shdr = (union lpfc_sli4_cfg_shdr *)
17805 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17806 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17807 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17808 if (shdr_status || shdr_add_status || rc) {
17809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17810 "2508 WQ_DESTROY mailbox failed with "
17811 "status x%x add_status x%x, mbx status x%x\n",
17812 shdr_status, shdr_add_status, rc);
17815 /* Remove wq from any list */
17816 list_del_init(&wq->list);
17819 mempool_free(mbox, wq->phba->mbox_mem_pool);
17824 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17825 * @phba: HBA structure that indicates port to destroy a queue on.
17826 * @hrq: The queue structure associated with the queue to destroy.
17827 * @drq: The queue structure associated with the queue to destroy.
17829 * This function destroys a queue, as detailed in @rq by sending an mailbox
17830 * command, specific to the type of queue, to the HBA.
17832 * The @rq struct is used to get the queue ID of the queue to destroy.
17834 * On success this function will return a zero. If the queue destroy mailbox
17835 * command fails this function will return -ENXIO.
17838 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17839 struct lpfc_queue *drq)
17841 LPFC_MBOXQ_t *mbox;
17842 int rc, length, status = 0;
17843 uint32_t shdr_status, shdr_add_status;
17844 union lpfc_sli4_cfg_shdr *shdr;
17846 /* sanity check on queue memory */
17849 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17852 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17853 sizeof(struct lpfc_sli4_cfg_mhdr));
17854 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17855 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17856 length, LPFC_SLI4_MBX_EMBED);
17857 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17859 mbox->vport = hrq->phba->pport;
17860 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17861 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17862 /* The IOCTL status is embedded in the mailbox subheader. */
17863 shdr = (union lpfc_sli4_cfg_shdr *)
17864 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17865 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17866 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17867 if (shdr_status || shdr_add_status || rc) {
17868 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17869 "2509 RQ_DESTROY mailbox failed with "
17870 "status x%x add_status x%x, mbx status x%x\n",
17871 shdr_status, shdr_add_status, rc);
17872 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17875 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17877 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17878 shdr = (union lpfc_sli4_cfg_shdr *)
17879 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17880 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17881 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17882 if (shdr_status || shdr_add_status || rc) {
17883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17884 "2510 RQ_DESTROY mailbox failed with "
17885 "status x%x add_status x%x, mbx status x%x\n",
17886 shdr_status, shdr_add_status, rc);
17889 list_del_init(&hrq->list);
17890 list_del_init(&drq->list);
17891 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17896 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17897 * @phba: The virtual port for which this call being executed.
17898 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17899 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17900 * @xritag: the xritag that ties this io to the SGL pages.
17902 * This routine will post the sgl pages for the IO that has the xritag
17903 * that is in the iocbq structure. The xritag is assigned during iocbq
17904 * creation and persists for as long as the driver is loaded.
17905 * if the caller has fewer than 256 scatter gather segments to map then
17906 * pdma_phys_addr1 should be 0.
17907 * If the caller needs to map more than 256 scatter gather segment then
17908 * pdma_phys_addr1 should be a valid physical address.
17909 * physical address for SGLs must be 64 byte aligned.
17910 * If you are going to map 2 SGL's then the first one must have 256 entries
17911 * the second sgl can have between 1 and 256 entries.
17915 * -ENXIO, -ENOMEM - Failure
17918 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17919 dma_addr_t pdma_phys_addr0,
17920 dma_addr_t pdma_phys_addr1,
17923 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17924 LPFC_MBOXQ_t *mbox;
17926 uint32_t shdr_status, shdr_add_status;
17928 union lpfc_sli4_cfg_shdr *shdr;
17930 if (xritag == NO_XRI) {
17931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17932 "0364 Invalid param:\n");
17936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17940 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17941 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17942 sizeof(struct lpfc_mbx_post_sgl_pages) -
17943 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17945 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17946 &mbox->u.mqe.un.post_sgl_pages;
17947 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17948 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17950 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17951 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17952 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17953 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17955 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17956 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17957 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17958 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17959 if (!phba->sli4_hba.intr_enable)
17960 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17962 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17963 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17965 /* The IOCTL status is embedded in the mailbox subheader. */
17966 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17967 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17968 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17969 if (!phba->sli4_hba.intr_enable)
17970 mempool_free(mbox, phba->mbox_mem_pool);
17971 else if (rc != MBX_TIMEOUT)
17972 mempool_free(mbox, phba->mbox_mem_pool);
17973 if (shdr_status || shdr_add_status || rc) {
17974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17975 "2511 POST_SGL mailbox failed with "
17976 "status x%x add_status x%x, mbx status x%x\n",
17977 shdr_status, shdr_add_status, rc);
17983 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17984 * @phba: pointer to lpfc hba data structure.
17986 * This routine is invoked to post rpi header templates to the
17987 * HBA consistent with the SLI-4 interface spec. This routine
17988 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17989 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17992 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17993 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17996 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
18001 * Fetch the next logical xri. Because this index is logical,
18002 * the driver starts at 0 each time.
18004 spin_lock_irq(&phba->hbalock);
18005 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
18006 phba->sli4_hba.max_cfg_param.max_xri, 0);
18007 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
18008 spin_unlock_irq(&phba->hbalock);
18011 set_bit(xri, phba->sli4_hba.xri_bmask);
18012 phba->sli4_hba.max_cfg_param.xri_used++;
18014 spin_unlock_irq(&phba->hbalock);
18019 * __lpfc_sli4_free_xri - Release an xri for reuse.
18020 * @phba: pointer to lpfc hba data structure.
18021 * @xri: xri to release.
18023 * This routine is invoked to release an xri to the pool of
18024 * available rpis maintained by the driver.
18027 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18029 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18030 phba->sli4_hba.max_cfg_param.xri_used--;
18035 * lpfc_sli4_free_xri - Release an xri for reuse.
18036 * @phba: pointer to lpfc hba data structure.
18037 * @xri: xri to release.
18039 * This routine is invoked to release an xri to the pool of
18040 * available rpis maintained by the driver.
18043 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18045 spin_lock_irq(&phba->hbalock);
18046 __lpfc_sli4_free_xri(phba, xri);
18047 spin_unlock_irq(&phba->hbalock);
18051 * lpfc_sli4_next_xritag - Get an xritag for the io
18052 * @phba: Pointer to HBA context object.
18054 * This function gets an xritag for the iocb. If there is no unused xritag
18055 * it will return 0xffff.
18056 * The function returns the allocated xritag if successful, else returns zero.
18057 * Zero is not a valid xritag.
18058 * The caller is not required to hold any lock.
18061 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18063 uint16_t xri_index;
18065 xri_index = lpfc_sli4_alloc_xri(phba);
18066 if (xri_index == NO_XRI)
18067 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18068 "2004 Failed to allocate XRI.last XRITAG is %d"
18069 " Max XRI is %d, Used XRI is %d\n",
18071 phba->sli4_hba.max_cfg_param.max_xri,
18072 phba->sli4_hba.max_cfg_param.xri_used);
18077 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18078 * @phba: pointer to lpfc hba data structure.
18079 * @post_sgl_list: pointer to els sgl entry list.
18080 * @post_cnt: number of els sgl entries on the list.
18082 * This routine is invoked to post a block of driver's sgl pages to the
18083 * HBA using non-embedded mailbox command. No Lock is held. This routine
18084 * is only called when the driver is loading and after all IO has been
18088 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18089 struct list_head *post_sgl_list,
18092 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18093 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18094 struct sgl_page_pairs *sgl_pg_pairs;
18096 LPFC_MBOXQ_t *mbox;
18097 uint32_t reqlen, alloclen, pg_pairs;
18099 uint16_t xritag_start = 0;
18101 uint32_t shdr_status, shdr_add_status;
18102 union lpfc_sli4_cfg_shdr *shdr;
18104 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18105 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18106 if (reqlen > SLI4_PAGE_SIZE) {
18107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18108 "2559 Block sgl registration required DMA "
18109 "size (%d) great than a page\n", reqlen);
18113 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18117 /* Allocate DMA memory and set up the non-embedded mailbox command */
18118 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18119 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18120 LPFC_SLI4_MBX_NEMBED);
18122 if (alloclen < reqlen) {
18123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18124 "0285 Allocated DMA memory size (%d) is "
18125 "less than the requested DMA memory "
18126 "size (%d)\n", alloclen, reqlen);
18127 lpfc_sli4_mbox_cmd_free(phba, mbox);
18130 /* Set up the SGL pages in the non-embedded DMA pages */
18131 viraddr = mbox->sge_array->addr[0];
18132 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18133 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18136 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18137 /* Set up the sge entry */
18138 sgl_pg_pairs->sgl_pg0_addr_lo =
18139 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18140 sgl_pg_pairs->sgl_pg0_addr_hi =
18141 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18142 sgl_pg_pairs->sgl_pg1_addr_lo =
18143 cpu_to_le32(putPaddrLow(0));
18144 sgl_pg_pairs->sgl_pg1_addr_hi =
18145 cpu_to_le32(putPaddrHigh(0));
18147 /* Keep the first xritag on the list */
18149 xritag_start = sglq_entry->sli4_xritag;
18154 /* Complete initialization and perform endian conversion. */
18155 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18156 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18157 sgl->word0 = cpu_to_le32(sgl->word0);
18159 if (!phba->sli4_hba.intr_enable)
18160 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18162 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18163 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18165 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18166 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18167 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18168 if (!phba->sli4_hba.intr_enable)
18169 lpfc_sli4_mbox_cmd_free(phba, mbox);
18170 else if (rc != MBX_TIMEOUT)
18171 lpfc_sli4_mbox_cmd_free(phba, mbox);
18172 if (shdr_status || shdr_add_status || rc) {
18173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18174 "2513 POST_SGL_BLOCK mailbox command failed "
18175 "status x%x add_status x%x mbx status x%x\n",
18176 shdr_status, shdr_add_status, rc);
18183 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18184 * @phba: pointer to lpfc hba data structure.
18185 * @nblist: pointer to nvme buffer list.
18186 * @count: number of scsi buffers on the list.
18188 * This routine is invoked to post a block of @count scsi sgl pages from a
18189 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18194 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18197 struct lpfc_io_buf *lpfc_ncmd;
18198 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18199 struct sgl_page_pairs *sgl_pg_pairs;
18201 LPFC_MBOXQ_t *mbox;
18202 uint32_t reqlen, alloclen, pg_pairs;
18204 uint16_t xritag_start = 0;
18206 uint32_t shdr_status, shdr_add_status;
18207 dma_addr_t pdma_phys_bpl1;
18208 union lpfc_sli4_cfg_shdr *shdr;
18210 /* Calculate the requested length of the dma memory */
18211 reqlen = count * sizeof(struct sgl_page_pairs) +
18212 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18213 if (reqlen > SLI4_PAGE_SIZE) {
18214 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18215 "6118 Block sgl registration required DMA "
18216 "size (%d) great than a page\n", reqlen);
18219 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18222 "6119 Failed to allocate mbox cmd memory\n");
18226 /* Allocate DMA memory and set up the non-embedded mailbox command */
18227 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18228 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18229 reqlen, LPFC_SLI4_MBX_NEMBED);
18231 if (alloclen < reqlen) {
18232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18233 "6120 Allocated DMA memory size (%d) is "
18234 "less than the requested DMA memory "
18235 "size (%d)\n", alloclen, reqlen);
18236 lpfc_sli4_mbox_cmd_free(phba, mbox);
18240 /* Get the first SGE entry from the non-embedded DMA memory */
18241 viraddr = mbox->sge_array->addr[0];
18243 /* Set up the SGL pages in the non-embedded DMA pages */
18244 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18245 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18248 list_for_each_entry(lpfc_ncmd, nblist, list) {
18249 /* Set up the sge entry */
18250 sgl_pg_pairs->sgl_pg0_addr_lo =
18251 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18252 sgl_pg_pairs->sgl_pg0_addr_hi =
18253 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18254 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18255 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18258 pdma_phys_bpl1 = 0;
18259 sgl_pg_pairs->sgl_pg1_addr_lo =
18260 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18261 sgl_pg_pairs->sgl_pg1_addr_hi =
18262 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18263 /* Keep the first xritag on the list */
18265 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18269 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18270 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18271 /* Perform endian conversion if necessary */
18272 sgl->word0 = cpu_to_le32(sgl->word0);
18274 if (!phba->sli4_hba.intr_enable) {
18275 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18277 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18278 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18280 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18281 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18282 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18283 if (!phba->sli4_hba.intr_enable)
18284 lpfc_sli4_mbox_cmd_free(phba, mbox);
18285 else if (rc != MBX_TIMEOUT)
18286 lpfc_sli4_mbox_cmd_free(phba, mbox);
18287 if (shdr_status || shdr_add_status || rc) {
18288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18289 "6125 POST_SGL_BLOCK mailbox command failed "
18290 "status x%x add_status x%x mbx status x%x\n",
18291 shdr_status, shdr_add_status, rc);
18298 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18299 * @phba: pointer to lpfc hba data structure.
18300 * @post_nblist: pointer to the nvme buffer list.
18301 * @sb_count: number of nvme buffers.
18303 * This routine walks a list of nvme buffers that was passed in. It attempts
18304 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18305 * uses the non-embedded SGL block post mailbox commands to post to the port.
18306 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18307 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18308 * must be local list, thus no lock is needed when manipulate the list.
18310 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18313 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18314 struct list_head *post_nblist, int sb_count)
18316 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18317 int status, sgl_size;
18318 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18319 dma_addr_t pdma_phys_sgl1;
18320 int last_xritag = NO_XRI;
18322 LIST_HEAD(prep_nblist);
18323 LIST_HEAD(blck_nblist);
18324 LIST_HEAD(nvme_nblist);
18330 sgl_size = phba->cfg_sg_dma_buf_size;
18331 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18332 list_del_init(&lpfc_ncmd->list);
18334 if ((last_xritag != NO_XRI) &&
18335 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18336 /* a hole in xri block, form a sgl posting block */
18337 list_splice_init(&prep_nblist, &blck_nblist);
18338 post_cnt = block_cnt - 1;
18339 /* prepare list for next posting block */
18340 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18343 /* prepare list for next posting block */
18344 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18345 /* enough sgls for non-embed sgl mbox command */
18346 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18347 list_splice_init(&prep_nblist, &blck_nblist);
18348 post_cnt = block_cnt;
18353 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18355 /* end of repost sgl list condition for NVME buffers */
18356 if (num_posting == sb_count) {
18357 if (post_cnt == 0) {
18358 /* last sgl posting block */
18359 list_splice_init(&prep_nblist, &blck_nblist);
18360 post_cnt = block_cnt;
18361 } else if (block_cnt == 1) {
18362 /* last single sgl with non-contiguous xri */
18363 if (sgl_size > SGL_PAGE_SIZE)
18365 lpfc_ncmd->dma_phys_sgl +
18368 pdma_phys_sgl1 = 0;
18369 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18370 status = lpfc_sli4_post_sgl(
18371 phba, lpfc_ncmd->dma_phys_sgl,
18372 pdma_phys_sgl1, cur_xritag);
18374 /* Post error. Buffer unavailable. */
18375 lpfc_ncmd->flags |=
18376 LPFC_SBUF_NOT_POSTED;
18378 /* Post success. Bffer available. */
18379 lpfc_ncmd->flags &=
18380 ~LPFC_SBUF_NOT_POSTED;
18381 lpfc_ncmd->status = IOSTAT_SUCCESS;
18384 /* success, put on NVME buffer sgl list */
18385 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18389 /* continue until a nembed page worth of sgls */
18393 /* post block of NVME buffer list sgls */
18394 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18397 /* don't reset xirtag due to hole in xri block */
18398 if (block_cnt == 0)
18399 last_xritag = NO_XRI;
18401 /* reset NVME buffer post count for next round of posting */
18404 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18405 while (!list_empty(&blck_nblist)) {
18406 list_remove_head(&blck_nblist, lpfc_ncmd,
18407 struct lpfc_io_buf, list);
18409 /* Post error. Mark buffer unavailable. */
18410 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18412 /* Post success, Mark buffer available. */
18413 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18414 lpfc_ncmd->status = IOSTAT_SUCCESS;
18417 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18420 /* Push NVME buffers with sgl posted to the available list */
18421 lpfc_io_buf_replenish(phba, &nvme_nblist);
18427 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18428 * @phba: pointer to lpfc_hba struct that the frame was received on
18429 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18431 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18432 * valid type of frame that the LPFC driver will handle. This function will
18433 * return a zero if the frame is a valid frame or a non zero value when the
18434 * frame does not pass the check.
18437 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18439 /* make rctl_names static to save stack space */
18440 struct fc_vft_header *fc_vft_hdr;
18441 uint32_t *header = (uint32_t *) fc_hdr;
18443 #define FC_RCTL_MDS_DIAGS 0xF4
18445 switch (fc_hdr->fh_r_ctl) {
18446 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18447 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18448 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18449 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18450 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18451 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18452 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18453 case FC_RCTL_DD_CMD_STATUS: /* command status */
18454 case FC_RCTL_ELS_REQ: /* extended link services request */
18455 case FC_RCTL_ELS_REP: /* extended link services reply */
18456 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18457 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18458 case FC_RCTL_BA_ABTS: /* basic link service abort */
18459 case FC_RCTL_BA_RMC: /* remove connection */
18460 case FC_RCTL_BA_ACC: /* basic accept */
18461 case FC_RCTL_BA_RJT: /* basic reject */
18462 case FC_RCTL_BA_PRMT:
18463 case FC_RCTL_ACK_1: /* acknowledge_1 */
18464 case FC_RCTL_ACK_0: /* acknowledge_0 */
18465 case FC_RCTL_P_RJT: /* port reject */
18466 case FC_RCTL_F_RJT: /* fabric reject */
18467 case FC_RCTL_P_BSY: /* port busy */
18468 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18469 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18470 case FC_RCTL_LCR: /* link credit reset */
18471 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18472 case FC_RCTL_END: /* end */
18474 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18475 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18476 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18477 return lpfc_fc_frame_check(phba, fc_hdr);
18478 case FC_RCTL_BA_NOP: /* basic link service NOP */
18483 switch (fc_hdr->fh_type) {
18496 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18497 "2538 Received frame rctl:x%x, type:x%x, "
18498 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18499 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18500 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18501 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18502 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18503 be32_to_cpu(header[6]));
18506 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18507 "2539 Dropped frame rctl:x%x type:x%x\n",
18508 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18513 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18514 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18516 * This function processes the FC header to retrieve the VFI from the VF
18517 * header, if one exists. This function will return the VFI if one exists
18518 * or 0 if no VSAN Header exists.
18521 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18523 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18525 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18527 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18531 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18532 * @phba: Pointer to the HBA structure to search for the vport on
18533 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18534 * @fcfi: The FC Fabric ID that the frame came from
18535 * @did: Destination ID to match against
18537 * This function searches the @phba for a vport that matches the content of the
18538 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18539 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18540 * returns the matching vport pointer or NULL if unable to match frame to a
18543 static struct lpfc_vport *
18544 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18545 uint16_t fcfi, uint32_t did)
18547 struct lpfc_vport **vports;
18548 struct lpfc_vport *vport = NULL;
18551 if (did == Fabric_DID)
18552 return phba->pport;
18553 if ((phba->pport->fc_flag & FC_PT2PT) &&
18554 !(phba->link_state == LPFC_HBA_READY))
18555 return phba->pport;
18557 vports = lpfc_create_vport_work_array(phba);
18558 if (vports != NULL) {
18559 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18560 if (phba->fcf.fcfi == fcfi &&
18561 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18562 vports[i]->fc_myDID == did) {
18568 lpfc_destroy_vport_work_array(phba, vports);
18573 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18574 * @vport: The vport to work on.
18576 * This function updates the receive sequence time stamp for this vport. The
18577 * receive sequence time stamp indicates the time that the last frame of the
18578 * the sequence that has been idle for the longest amount of time was received.
18579 * the driver uses this time stamp to indicate if any received sequences have
18583 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18585 struct lpfc_dmabuf *h_buf;
18586 struct hbq_dmabuf *dmabuf = NULL;
18588 /* get the oldest sequence on the rcv list */
18589 h_buf = list_get_first(&vport->rcv_buffer_list,
18590 struct lpfc_dmabuf, list);
18593 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18594 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18598 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18599 * @vport: The vport that the received sequences were sent to.
18601 * This function cleans up all outstanding received sequences. This is called
18602 * by the driver when a link event or user action invalidates all the received
18606 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18608 struct lpfc_dmabuf *h_buf, *hnext;
18609 struct lpfc_dmabuf *d_buf, *dnext;
18610 struct hbq_dmabuf *dmabuf = NULL;
18612 /* start with the oldest sequence on the rcv list */
18613 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18614 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18615 list_del_init(&dmabuf->hbuf.list);
18616 list_for_each_entry_safe(d_buf, dnext,
18617 &dmabuf->dbuf.list, list) {
18618 list_del_init(&d_buf->list);
18619 lpfc_in_buf_free(vport->phba, d_buf);
18621 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18626 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18627 * @vport: The vport that the received sequences were sent to.
18629 * This function determines whether any received sequences have timed out by
18630 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18631 * indicates that there is at least one timed out sequence this routine will
18632 * go through the received sequences one at a time from most inactive to most
18633 * active to determine which ones need to be cleaned up. Once it has determined
18634 * that a sequence needs to be cleaned up it will simply free up the resources
18635 * without sending an abort.
18638 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18640 struct lpfc_dmabuf *h_buf, *hnext;
18641 struct lpfc_dmabuf *d_buf, *dnext;
18642 struct hbq_dmabuf *dmabuf = NULL;
18643 unsigned long timeout;
18644 int abort_count = 0;
18646 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18647 vport->rcv_buffer_time_stamp);
18648 if (list_empty(&vport->rcv_buffer_list) ||
18649 time_before(jiffies, timeout))
18651 /* start with the oldest sequence on the rcv list */
18652 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18653 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18654 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18655 dmabuf->time_stamp);
18656 if (time_before(jiffies, timeout))
18659 list_del_init(&dmabuf->hbuf.list);
18660 list_for_each_entry_safe(d_buf, dnext,
18661 &dmabuf->dbuf.list, list) {
18662 list_del_init(&d_buf->list);
18663 lpfc_in_buf_free(vport->phba, d_buf);
18665 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18668 lpfc_update_rcv_time_stamp(vport);
18672 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18673 * @vport: pointer to a vitural port
18674 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18676 * This function searches through the existing incomplete sequences that have
18677 * been sent to this @vport. If the frame matches one of the incomplete
18678 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18679 * make up that sequence. If no sequence is found that matches this frame then
18680 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18681 * This function returns a pointer to the first dmabuf in the sequence list that
18682 * the frame was linked to.
18684 static struct hbq_dmabuf *
18685 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18687 struct fc_frame_header *new_hdr;
18688 struct fc_frame_header *temp_hdr;
18689 struct lpfc_dmabuf *d_buf;
18690 struct lpfc_dmabuf *h_buf;
18691 struct hbq_dmabuf *seq_dmabuf = NULL;
18692 struct hbq_dmabuf *temp_dmabuf = NULL;
18695 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18696 dmabuf->time_stamp = jiffies;
18697 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18699 /* Use the hdr_buf to find the sequence that this frame belongs to */
18700 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18701 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18702 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18703 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18704 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18706 /* found a pending sequence that matches this frame */
18707 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18712 * This indicates first frame received for this sequence.
18713 * Queue the buffer on the vport's rcv_buffer_list.
18715 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18716 lpfc_update_rcv_time_stamp(vport);
18719 temp_hdr = seq_dmabuf->hbuf.virt;
18720 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18721 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18722 list_del_init(&seq_dmabuf->hbuf.list);
18723 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18724 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18725 lpfc_update_rcv_time_stamp(vport);
18728 /* move this sequence to the tail to indicate a young sequence */
18729 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18730 seq_dmabuf->time_stamp = jiffies;
18731 lpfc_update_rcv_time_stamp(vport);
18732 if (list_empty(&seq_dmabuf->dbuf.list)) {
18733 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18736 /* find the correct place in the sequence to insert this frame */
18737 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18739 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18740 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18742 * If the frame's sequence count is greater than the frame on
18743 * the list then insert the frame right after this frame
18745 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18746 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18747 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18752 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18754 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18763 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18764 * @vport: pointer to a vitural port
18765 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18767 * This function tries to abort from the partially assembed sequence, described
18768 * by the information from basic abbort @dmabuf. It checks to see whether such
18769 * partially assembled sequence held by the driver. If so, it shall free up all
18770 * the frames from the partially assembled sequence.
18773 * true -- if there is matching partially assembled sequence present and all
18774 * the frames freed with the sequence;
18775 * false -- if there is no matching partially assembled sequence present so
18776 * nothing got aborted in the lower layer driver
18779 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18780 struct hbq_dmabuf *dmabuf)
18782 struct fc_frame_header *new_hdr;
18783 struct fc_frame_header *temp_hdr;
18784 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18785 struct hbq_dmabuf *seq_dmabuf = NULL;
18787 /* Use the hdr_buf to find the sequence that matches this frame */
18788 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18789 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18790 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18791 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18792 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18793 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18794 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18795 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18797 /* found a pending sequence that matches this frame */
18798 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18802 /* Free up all the frames from the partially assembled sequence */
18804 list_for_each_entry_safe(d_buf, n_buf,
18805 &seq_dmabuf->dbuf.list, list) {
18806 list_del_init(&d_buf->list);
18807 lpfc_in_buf_free(vport->phba, d_buf);
18815 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18816 * @vport: pointer to a vitural port
18817 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18819 * This function tries to abort from the assembed sequence from upper level
18820 * protocol, described by the information from basic abbort @dmabuf. It
18821 * checks to see whether such pending context exists at upper level protocol.
18822 * If so, it shall clean up the pending context.
18825 * true -- if there is matching pending context of the sequence cleaned
18827 * false -- if there is no matching pending context of the sequence present
18831 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18833 struct lpfc_hba *phba = vport->phba;
18836 /* Accepting abort at ulp with SLI4 only */
18837 if (phba->sli_rev < LPFC_SLI_REV4)
18840 /* Register all caring upper level protocols to attend abort */
18841 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18849 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18850 * @phba: Pointer to HBA context object.
18851 * @cmd_iocbq: pointer to the command iocbq structure.
18852 * @rsp_iocbq: pointer to the response iocbq structure.
18854 * This function handles the sequence abort response iocb command complete
18855 * event. It properly releases the memory allocated to the sequence abort
18859 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18860 struct lpfc_iocbq *cmd_iocbq,
18861 struct lpfc_iocbq *rsp_iocbq)
18863 struct lpfc_nodelist *ndlp;
18866 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18867 lpfc_nlp_put(ndlp);
18868 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18871 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18872 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18874 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18875 rsp_iocbq->iocb.ulpStatus,
18876 rsp_iocbq->iocb.un.ulpWord[4]);
18880 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18881 * @phba: Pointer to HBA context object.
18882 * @xri: xri id in transaction.
18884 * This function validates the xri maps to the known range of XRIs allocated an
18885 * used by the driver.
18888 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18893 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18894 if (xri == phba->sli4_hba.xri_ids[i])
18901 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18902 * @vport: pointer to a virtual port.
18903 * @fc_hdr: pointer to a FC frame header.
18904 * @aborted: was the partially assembled receive sequence successfully aborted
18906 * This function sends a basic response to a previous unsol sequence abort
18907 * event after aborting the sequence handling.
18910 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18911 struct fc_frame_header *fc_hdr, bool aborted)
18913 struct lpfc_hba *phba = vport->phba;
18914 struct lpfc_iocbq *ctiocb = NULL;
18915 struct lpfc_nodelist *ndlp;
18916 uint16_t oxid, rxid, xri, lxri;
18917 uint32_t sid, fctl;
18921 if (!lpfc_is_link_up(phba))
18924 sid = sli4_sid_from_fc_hdr(fc_hdr);
18925 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18926 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18928 ndlp = lpfc_findnode_did(vport, sid);
18930 ndlp = lpfc_nlp_init(vport, sid);
18932 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18933 "1268 Failed to allocate ndlp for "
18934 "oxid:x%x SID:x%x\n", oxid, sid);
18937 /* Put ndlp onto pport node list */
18938 lpfc_enqueue_node(vport, ndlp);
18941 /* Allocate buffer for rsp iocb */
18942 ctiocb = lpfc_sli_get_iocbq(phba);
18946 /* Extract the F_CTL field from FC_HDR */
18947 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18949 icmd = &ctiocb->iocb;
18950 icmd->un.xseq64.bdl.bdeSize = 0;
18951 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18952 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18953 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18954 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18956 /* Fill in the rest of iocb fields */
18957 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18958 icmd->ulpBdeCount = 0;
18960 icmd->ulpClass = CLASS3;
18961 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18962 ctiocb->context1 = lpfc_nlp_get(ndlp);
18963 if (!ctiocb->context1) {
18964 lpfc_sli_release_iocbq(phba, ctiocb);
18968 ctiocb->vport = phba->pport;
18969 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18970 ctiocb->sli4_lxritag = NO_XRI;
18971 ctiocb->sli4_xritag = NO_XRI;
18973 if (fctl & FC_FC_EX_CTX)
18974 /* Exchange responder sent the abort so we
18980 lxri = lpfc_sli4_xri_inrange(phba, xri);
18981 if (lxri != NO_XRI)
18982 lpfc_set_rrq_active(phba, ndlp, lxri,
18983 (xri == oxid) ? rxid : oxid, 0);
18984 /* For BA_ABTS from exchange responder, if the logical xri with
18985 * the oxid maps to the FCP XRI range, the port no longer has
18986 * that exchange context, send a BLS_RJT. Override the IOCB for
18989 if ((fctl & FC_FC_EX_CTX) &&
18990 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18991 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18992 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18993 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18994 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18997 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18998 * the driver no longer has that exchange, send a BLS_RJT. Override
18999 * the IOCB for a BA_RJT.
19001 if (aborted == false) {
19002 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
19003 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
19004 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
19005 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
19008 if (fctl & FC_FC_EX_CTX) {
19009 /* ABTS sent by responder to CT exchange, construction
19010 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
19011 * field and RX_ID from ABTS for RX_ID field.
19013 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
19015 /* ABTS sent by initiator to CT exchange, construction
19016 * of BA_ACC will need to allocate a new XRI as for the
19019 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
19021 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
19022 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
19024 /* Xmit CT abts response on exchange <xid> */
19025 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19026 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19027 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
19029 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19030 if (rc == IOCB_ERROR) {
19031 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19032 "2925 Failed to issue CT ABTS RSP x%x on "
19033 "xri x%x, Data x%x\n",
19034 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
19036 lpfc_nlp_put(ndlp);
19037 ctiocb->context1 = NULL;
19038 lpfc_sli_release_iocbq(phba, ctiocb);
19043 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19044 * @vport: Pointer to the vport on which this sequence was received
19045 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19047 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19048 * receive sequence is only partially assembed by the driver, it shall abort
19049 * the partially assembled frames for the sequence. Otherwise, if the
19050 * unsolicited receive sequence has been completely assembled and passed to
19051 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19052 * unsolicited sequence has been aborted. After that, it will issue a basic
19053 * accept to accept the abort.
19056 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19057 struct hbq_dmabuf *dmabuf)
19059 struct lpfc_hba *phba = vport->phba;
19060 struct fc_frame_header fc_hdr;
19064 /* Make a copy of fc_hdr before the dmabuf being released */
19065 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19066 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19068 if (fctl & FC_FC_EX_CTX) {
19069 /* ABTS by responder to exchange, no cleanup needed */
19072 /* ABTS by initiator to exchange, need to do cleanup */
19073 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19074 if (aborted == false)
19075 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19077 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19079 if (phba->nvmet_support) {
19080 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19084 /* Respond with BA_ACC or BA_RJT accordingly */
19085 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19089 * lpfc_seq_complete - Indicates if a sequence is complete
19090 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19092 * This function checks the sequence, starting with the frame described by
19093 * @dmabuf, to see if all the frames associated with this sequence are present.
19094 * the frames associated with this sequence are linked to the @dmabuf using the
19095 * dbuf list. This function looks for two major things. 1) That the first frame
19096 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19097 * set. 3) That there are no holes in the sequence count. The function will
19098 * return 1 when the sequence is complete, otherwise it will return 0.
19101 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19103 struct fc_frame_header *hdr;
19104 struct lpfc_dmabuf *d_buf;
19105 struct hbq_dmabuf *seq_dmabuf;
19109 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19110 /* make sure first fame of sequence has a sequence count of zero */
19111 if (hdr->fh_seq_cnt != seq_count)
19113 fctl = (hdr->fh_f_ctl[0] << 16 |
19114 hdr->fh_f_ctl[1] << 8 |
19116 /* If last frame of sequence we can return success. */
19117 if (fctl & FC_FC_END_SEQ)
19119 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19120 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19121 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19122 /* If there is a hole in the sequence count then fail. */
19123 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19125 fctl = (hdr->fh_f_ctl[0] << 16 |
19126 hdr->fh_f_ctl[1] << 8 |
19128 /* If last frame of sequence we can return success. */
19129 if (fctl & FC_FC_END_SEQ)
19136 * lpfc_prep_seq - Prep sequence for ULP processing
19137 * @vport: Pointer to the vport on which this sequence was received
19138 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19140 * This function takes a sequence, described by a list of frames, and creates
19141 * a list of iocbq structures to describe the sequence. This iocbq list will be
19142 * used to issue to the generic unsolicited sequence handler. This routine
19143 * returns a pointer to the first iocbq in the list. If the function is unable
19144 * to allocate an iocbq then it throw out the received frames that were not
19145 * able to be described and return a pointer to the first iocbq. If unable to
19146 * allocate any iocbqs (including the first) this function will return NULL.
19148 static struct lpfc_iocbq *
19149 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19151 struct hbq_dmabuf *hbq_buf;
19152 struct lpfc_dmabuf *d_buf, *n_buf;
19153 struct lpfc_iocbq *first_iocbq, *iocbq;
19154 struct fc_frame_header *fc_hdr;
19156 uint32_t len, tot_len;
19157 struct ulp_bde64 *pbde;
19159 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19160 /* remove from receive buffer list */
19161 list_del_init(&seq_dmabuf->hbuf.list);
19162 lpfc_update_rcv_time_stamp(vport);
19163 /* get the Remote Port's SID */
19164 sid = sli4_sid_from_fc_hdr(fc_hdr);
19166 /* Get an iocbq struct to fill in. */
19167 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19169 /* Initialize the first IOCB. */
19170 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
19171 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
19172 first_iocbq->vport = vport;
19174 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19175 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19176 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
19177 first_iocbq->iocb.un.rcvels.parmRo =
19178 sli4_did_from_fc_hdr(fc_hdr);
19179 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
19181 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
19182 first_iocbq->iocb.ulpContext = NO_XRI;
19183 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
19184 be16_to_cpu(fc_hdr->fh_ox_id);
19185 /* iocbq is prepped for internal consumption. Physical vpi. */
19186 first_iocbq->iocb.unsli3.rcvsli3.vpi =
19187 vport->phba->vpi_ids[vport->vpi];
19188 /* put the first buffer into the first IOCBq */
19189 tot_len = bf_get(lpfc_rcqe_length,
19190 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19192 first_iocbq->context2 = &seq_dmabuf->dbuf;
19193 first_iocbq->context3 = NULL;
19194 first_iocbq->iocb.ulpBdeCount = 1;
19195 if (tot_len > LPFC_DATA_BUF_SIZE)
19196 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19197 LPFC_DATA_BUF_SIZE;
19199 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
19201 first_iocbq->iocb.un.rcvels.remoteID = sid;
19203 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19205 iocbq = first_iocbq;
19207 * Each IOCBq can have two Buffers assigned, so go through the list
19208 * of buffers for this sequence and save two buffers in each IOCBq
19210 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19212 lpfc_in_buf_free(vport->phba, d_buf);
19215 if (!iocbq->context3) {
19216 iocbq->context3 = d_buf;
19217 iocbq->iocb.ulpBdeCount++;
19218 /* We need to get the size out of the right CQE */
19219 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19220 len = bf_get(lpfc_rcqe_length,
19221 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19222 pbde = (struct ulp_bde64 *)
19223 &iocbq->iocb.unsli3.sli3Words[4];
19224 if (len > LPFC_DATA_BUF_SIZE)
19225 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
19227 pbde->tus.f.bdeSize = len;
19229 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
19232 iocbq = lpfc_sli_get_iocbq(vport->phba);
19235 first_iocbq->iocb.ulpStatus =
19236 IOSTAT_FCP_RSP_ERROR;
19237 first_iocbq->iocb.un.ulpWord[4] =
19238 IOERR_NO_RESOURCES;
19240 lpfc_in_buf_free(vport->phba, d_buf);
19243 /* We need to get the size out of the right CQE */
19244 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19245 len = bf_get(lpfc_rcqe_length,
19246 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19247 iocbq->context2 = d_buf;
19248 iocbq->context3 = NULL;
19249 iocbq->iocb.ulpBdeCount = 1;
19250 if (len > LPFC_DATA_BUF_SIZE)
19251 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
19252 LPFC_DATA_BUF_SIZE;
19254 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
19257 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
19259 iocbq->iocb.un.rcvels.remoteID = sid;
19260 list_add_tail(&iocbq->list, &first_iocbq->list);
19263 /* Free the sequence's header buffer */
19265 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19267 return first_iocbq;
19271 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19272 struct hbq_dmabuf *seq_dmabuf)
19274 struct fc_frame_header *fc_hdr;
19275 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19276 struct lpfc_hba *phba = vport->phba;
19278 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19279 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19282 "2707 Ring %d handler: Failed to allocate "
19283 "iocb Rctl x%x Type x%x received\n",
19285 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19288 if (!lpfc_complete_unsol_iocb(phba,
19289 phba->sli4_hba.els_wq->pring,
19290 iocbq, fc_hdr->fh_r_ctl,
19291 fc_hdr->fh_type)) {
19292 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19293 "2540 Ring %d handler: unexpected Rctl "
19294 "x%x Type x%x received\n",
19296 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19297 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19300 /* Free iocb created in lpfc_prep_seq */
19301 list_for_each_entry_safe(curr_iocb, next_iocb,
19302 &iocbq->list, list) {
19303 list_del_init(&curr_iocb->list);
19304 lpfc_sli_release_iocbq(phba, curr_iocb);
19306 lpfc_sli_release_iocbq(phba, iocbq);
19310 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19311 struct lpfc_iocbq *rspiocb)
19313 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
19315 if (pcmd && pcmd->virt)
19316 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19318 lpfc_sli_release_iocbq(phba, cmdiocb);
19319 lpfc_drain_txq(phba);
19323 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19324 struct hbq_dmabuf *dmabuf)
19326 struct fc_frame_header *fc_hdr;
19327 struct lpfc_hba *phba = vport->phba;
19328 struct lpfc_iocbq *iocbq = NULL;
19329 union lpfc_wqe *wqe;
19330 struct lpfc_dmabuf *pcmd = NULL;
19331 uint32_t frame_len;
19333 unsigned long iflags;
19335 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19336 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19338 /* Send the received frame back */
19339 iocbq = lpfc_sli_get_iocbq(phba);
19341 /* Queue cq event and wakeup worker thread to process it */
19342 spin_lock_irqsave(&phba->hbalock, iflags);
19343 list_add_tail(&dmabuf->cq_event.list,
19344 &phba->sli4_hba.sp_queue_event);
19345 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19346 spin_unlock_irqrestore(&phba->hbalock, iflags);
19347 lpfc_worker_wake_up(phba);
19351 /* Allocate buffer for command payload */
19352 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19354 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19356 if (!pcmd || !pcmd->virt)
19359 INIT_LIST_HEAD(&pcmd->list);
19361 /* copyin the payload */
19362 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19364 /* fill in BDE's for command */
19365 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
19366 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
19367 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
19368 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
19370 iocbq->context2 = pcmd;
19371 iocbq->vport = vport;
19372 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
19373 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
19376 * Setup rest of the iocb as though it were a WQE
19377 * Build the SEND_FRAME WQE
19379 wqe = (union lpfc_wqe *)&iocbq->iocb;
19381 wqe->send_frame.frame_len = frame_len;
19382 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
19383 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
19384 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
19385 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
19386 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
19387 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
19389 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
19390 iocbq->iocb.ulpLe = 1;
19391 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
19392 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19393 if (rc == IOCB_ERROR)
19396 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19400 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19401 "2023 Unable to process MDS loopback frame\n");
19402 if (pcmd && pcmd->virt)
19403 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19406 lpfc_sli_release_iocbq(phba, iocbq);
19407 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19411 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19412 * @phba: Pointer to HBA context object.
19413 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19415 * This function is called with no lock held. This function processes all
19416 * the received buffers and gives it to upper layers when a received buffer
19417 * indicates that it is the final frame in the sequence. The interrupt
19418 * service routine processes received buffers at interrupt contexts.
19419 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19420 * appropriate receive function when the final frame in a sequence is received.
19423 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19424 struct hbq_dmabuf *dmabuf)
19426 struct hbq_dmabuf *seq_dmabuf;
19427 struct fc_frame_header *fc_hdr;
19428 struct lpfc_vport *vport;
19432 /* Process each received buffer */
19433 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19435 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19436 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19437 vport = phba->pport;
19438 /* Handle MDS Loopback frames */
19439 if (!(phba->pport->load_flag & FC_UNLOADING))
19440 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19442 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19446 /* check to see if this a valid type of frame */
19447 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19448 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19452 if ((bf_get(lpfc_cqe_code,
19453 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19454 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19455 &dmabuf->cq_event.cqe.rcqe_cmpl);
19457 fcfi = bf_get(lpfc_rcqe_fcf_id,
19458 &dmabuf->cq_event.cqe.rcqe_cmpl);
19460 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19461 vport = phba->pport;
19462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19463 "2023 MDS Loopback %d bytes\n",
19464 bf_get(lpfc_rcqe_length,
19465 &dmabuf->cq_event.cqe.rcqe_cmpl));
19466 /* Handle MDS Loopback frames */
19467 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19471 /* d_id this frame is directed to */
19472 did = sli4_did_from_fc_hdr(fc_hdr);
19474 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19476 /* throw out the frame */
19477 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19481 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19482 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19483 (did != Fabric_DID)) {
19485 * Throw out the frame if we are not pt2pt.
19486 * The pt2pt protocol allows for discovery frames
19487 * to be received without a registered VPI.
19489 if (!(vport->fc_flag & FC_PT2PT) ||
19490 (phba->link_state == LPFC_HBA_READY)) {
19491 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19496 /* Handle the basic abort sequence (BA_ABTS) event */
19497 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19498 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19502 /* Link this frame */
19503 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19505 /* unable to add frame to vport - throw it out */
19506 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19509 /* If not last frame in sequence continue processing frames. */
19510 if (!lpfc_seq_complete(seq_dmabuf))
19513 /* Send the complete sequence to the upper layer protocol */
19514 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19518 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19519 * @phba: pointer to lpfc hba data structure.
19521 * This routine is invoked to post rpi header templates to the
19522 * HBA consistent with the SLI-4 interface spec. This routine
19523 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19524 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19526 * This routine does not require any locks. It's usage is expected
19527 * to be driver load or reset recovery when the driver is
19532 * -EIO - The mailbox failed to complete successfully.
19533 * When this error occurs, the driver is not guaranteed
19534 * to have any rpi regions posted to the device and
19535 * must either attempt to repost the regions or take a
19539 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19541 struct lpfc_rpi_hdr *rpi_page;
19545 /* SLI4 ports that support extents do not require RPI headers. */
19546 if (!phba->sli4_hba.rpi_hdrs_in_use)
19548 if (phba->sli4_hba.extents_in_use)
19551 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19553 * Assign the rpi headers a physical rpi only if the driver
19554 * has not initialized those resources. A port reset only
19555 * needs the headers posted.
19557 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19559 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19561 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19562 if (rc != MBX_SUCCESS) {
19563 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19564 "2008 Error %d posting all rpi "
19572 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19573 LPFC_RPI_RSRC_RDY);
19578 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19579 * @phba: pointer to lpfc hba data structure.
19580 * @rpi_page: pointer to the rpi memory region.
19582 * This routine is invoked to post a single rpi header to the
19583 * HBA consistent with the SLI-4 interface spec. This memory region
19584 * maps up to 64 rpi context regions.
19588 * -ENOMEM - No available memory
19589 * -EIO - The mailbox failed to complete successfully.
19592 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19594 LPFC_MBOXQ_t *mboxq;
19595 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19597 uint32_t shdr_status, shdr_add_status;
19598 union lpfc_sli4_cfg_shdr *shdr;
19600 /* SLI4 ports that support extents do not require RPI headers. */
19601 if (!phba->sli4_hba.rpi_hdrs_in_use)
19603 if (phba->sli4_hba.extents_in_use)
19606 /* The port is notified of the header region via a mailbox command. */
19607 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19610 "2001 Unable to allocate memory for issuing "
19611 "SLI_CONFIG_SPECIAL mailbox command\n");
19615 /* Post all rpi memory regions to the port. */
19616 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19617 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19618 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19619 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19620 sizeof(struct lpfc_sli4_cfg_mhdr),
19621 LPFC_SLI4_MBX_EMBED);
19624 /* Post the physical rpi to the port for this rpi header. */
19625 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19626 rpi_page->start_rpi);
19627 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19628 hdr_tmpl, rpi_page->page_count);
19630 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19631 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19633 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19634 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19635 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19636 mempool_free(mboxq, phba->mbox_mem_pool);
19637 if (shdr_status || shdr_add_status || rc) {
19638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19639 "2514 POST_RPI_HDR mailbox failed with "
19640 "status x%x add_status x%x, mbx status x%x\n",
19641 shdr_status, shdr_add_status, rc);
19645 * The next_rpi stores the next logical module-64 rpi value used
19646 * to post physical rpis in subsequent rpi postings.
19648 spin_lock_irq(&phba->hbalock);
19649 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19650 spin_unlock_irq(&phba->hbalock);
19656 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19657 * @phba: pointer to lpfc hba data structure.
19659 * This routine is invoked to post rpi header templates to the
19660 * HBA consistent with the SLI-4 interface spec. This routine
19661 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19662 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19665 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19666 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19669 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19672 uint16_t max_rpi, rpi_limit;
19673 uint16_t rpi_remaining, lrpi = 0;
19674 struct lpfc_rpi_hdr *rpi_hdr;
19675 unsigned long iflag;
19678 * Fetch the next logical rpi. Because this index is logical,
19679 * the driver starts at 0 each time.
19681 spin_lock_irqsave(&phba->hbalock, iflag);
19682 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19683 rpi_limit = phba->sli4_hba.next_rpi;
19685 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
19686 if (rpi >= rpi_limit)
19687 rpi = LPFC_RPI_ALLOC_ERROR;
19689 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19690 phba->sli4_hba.max_cfg_param.rpi_used++;
19691 phba->sli4_hba.rpi_count++;
19693 lpfc_printf_log(phba, KERN_INFO,
19694 LOG_NODE | LOG_DISCOVERY,
19695 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19696 (int) rpi, max_rpi, rpi_limit);
19699 * Don't try to allocate more rpi header regions if the device limit
19700 * has been exhausted.
19702 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19703 (phba->sli4_hba.rpi_count >= max_rpi)) {
19704 spin_unlock_irqrestore(&phba->hbalock, iflag);
19709 * RPI header postings are not required for SLI4 ports capable of
19712 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19713 spin_unlock_irqrestore(&phba->hbalock, iflag);
19718 * If the driver is running low on rpi resources, allocate another
19719 * page now. Note that the next_rpi value is used because
19720 * it represents how many are actually in use whereas max_rpi notes
19721 * how many are supported max by the device.
19723 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19724 spin_unlock_irqrestore(&phba->hbalock, iflag);
19725 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19726 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19729 "2002 Error Could not grow rpi "
19732 lrpi = rpi_hdr->start_rpi;
19733 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19734 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19742 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19743 * @phba: pointer to lpfc hba data structure.
19744 * @rpi: rpi to free
19746 * This routine is invoked to release an rpi to the pool of
19747 * available rpis maintained by the driver.
19750 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19753 * if the rpi value indicates a prior unreg has already
19754 * been done, skip the unreg.
19756 if (rpi == LPFC_RPI_ALLOC_ERROR)
19759 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19760 phba->sli4_hba.rpi_count--;
19761 phba->sli4_hba.max_cfg_param.rpi_used--;
19763 lpfc_printf_log(phba, KERN_INFO,
19764 LOG_NODE | LOG_DISCOVERY,
19765 "2016 rpi %x not inuse\n",
19771 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19772 * @phba: pointer to lpfc hba data structure.
19773 * @rpi: rpi to free
19775 * This routine is invoked to release an rpi to the pool of
19776 * available rpis maintained by the driver.
19779 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19781 spin_lock_irq(&phba->hbalock);
19782 __lpfc_sli4_free_rpi(phba, rpi);
19783 spin_unlock_irq(&phba->hbalock);
19787 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19788 * @phba: pointer to lpfc hba data structure.
19790 * This routine is invoked to remove the memory region that
19791 * provided rpi via a bitmask.
19794 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19796 kfree(phba->sli4_hba.rpi_bmask);
19797 kfree(phba->sli4_hba.rpi_ids);
19798 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19802 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19803 * @ndlp: pointer to lpfc nodelist data structure.
19804 * @cmpl: completion call-back.
19805 * @arg: data to load as MBox 'caller buffer information'
19807 * This routine is invoked to remove the memory region that
19808 * provided rpi via a bitmask.
19811 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19812 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19814 LPFC_MBOXQ_t *mboxq;
19815 struct lpfc_hba *phba = ndlp->phba;
19818 /* The port is notified of the header region via a mailbox command. */
19819 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19823 /* If cmpl assigned, then this nlp_get pairs with
19824 * lpfc_mbx_cmpl_resume_rpi.
19826 * Else cmpl is NULL, then this nlp_get pairs with
19827 * lpfc_sli_def_mbox_cmpl.
19829 if (!lpfc_nlp_get(ndlp)) {
19830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19831 "2122 %s: Failed to get nlp ref\n",
19833 mempool_free(mboxq, phba->mbox_mem_pool);
19837 /* Post all rpi memory regions to the port. */
19838 lpfc_resume_rpi(mboxq, ndlp);
19840 mboxq->mbox_cmpl = cmpl;
19841 mboxq->ctx_buf = arg;
19843 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19844 mboxq->ctx_ndlp = ndlp;
19845 mboxq->vport = ndlp->vport;
19846 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19847 if (rc == MBX_NOT_FINISHED) {
19848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19849 "2010 Resume RPI Mailbox failed "
19850 "status %d, mbxStatus x%x\n", rc,
19851 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19852 lpfc_nlp_put(ndlp);
19853 mempool_free(mboxq, phba->mbox_mem_pool);
19860 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19861 * @vport: Pointer to the vport for which the vpi is being initialized
19863 * This routine is invoked to activate a vpi with the port.
19867 * -Evalue otherwise
19870 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19872 LPFC_MBOXQ_t *mboxq;
19874 int retval = MBX_SUCCESS;
19876 struct lpfc_hba *phba = vport->phba;
19877 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19880 lpfc_init_vpi(phba, mboxq, vport->vpi);
19881 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19882 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19883 if (rc != MBX_SUCCESS) {
19884 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19885 "2022 INIT VPI Mailbox failed "
19886 "status %d, mbxStatus x%x\n", rc,
19887 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19890 if (rc != MBX_TIMEOUT)
19891 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19897 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19898 * @phba: pointer to lpfc hba data structure.
19899 * @mboxq: Pointer to mailbox object.
19901 * This routine is invoked to manually add a single FCF record. The caller
19902 * must pass a completely initialized FCF_Record. This routine takes
19903 * care of the nonembedded mailbox operations.
19906 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19909 union lpfc_sli4_cfg_shdr *shdr;
19910 uint32_t shdr_status, shdr_add_status;
19912 virt_addr = mboxq->sge_array->addr[0];
19913 /* The IOCTL status is embedded in the mailbox subheader. */
19914 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19915 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19916 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19918 if ((shdr_status || shdr_add_status) &&
19919 (shdr_status != STATUS_FCF_IN_USE))
19920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19921 "2558 ADD_FCF_RECORD mailbox failed with "
19922 "status x%x add_status x%x\n",
19923 shdr_status, shdr_add_status);
19925 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19929 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19930 * @phba: pointer to lpfc hba data structure.
19931 * @fcf_record: pointer to the initialized fcf record to add.
19933 * This routine is invoked to manually add a single FCF record. The caller
19934 * must pass a completely initialized FCF_Record. This routine takes
19935 * care of the nonembedded mailbox operations.
19938 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19941 LPFC_MBOXQ_t *mboxq;
19944 struct lpfc_mbx_sge sge;
19945 uint32_t alloc_len, req_len;
19948 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19951 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19955 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19958 /* Allocate DMA memory and set up the non-embedded mailbox command */
19959 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19960 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19961 req_len, LPFC_SLI4_MBX_NEMBED);
19962 if (alloc_len < req_len) {
19963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19964 "2523 Allocated DMA memory size (x%x) is "
19965 "less than the requested DMA memory "
19966 "size (x%x)\n", alloc_len, req_len);
19967 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19972 * Get the first SGE entry from the non-embedded DMA memory. This
19973 * routine only uses a single SGE.
19975 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19976 virt_addr = mboxq->sge_array->addr[0];
19978 * Configure the FCF record for FCFI 0. This is the driver's
19979 * hardcoded default and gets used in nonFIP mode.
19981 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19982 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19983 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19986 * Copy the fcf_index and the FCF Record Data. The data starts after
19987 * the FCoE header plus word10. The data copy needs to be endian
19990 bytep += sizeof(uint32_t);
19991 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19992 mboxq->vport = phba->pport;
19993 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19994 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19995 if (rc == MBX_NOT_FINISHED) {
19996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19997 "2515 ADD_FCF_RECORD mailbox failed with "
19998 "status 0x%x\n", rc);
19999 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20008 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20009 * @phba: pointer to lpfc hba data structure.
20010 * @fcf_record: pointer to the fcf record to write the default data.
20011 * @fcf_index: FCF table entry index.
20013 * This routine is invoked to build the driver's default FCF record. The
20014 * values used are hardcoded. This routine handles memory initialization.
20018 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20019 struct fcf_record *fcf_record,
20020 uint16_t fcf_index)
20022 memset(fcf_record, 0, sizeof(struct fcf_record));
20023 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20024 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20025 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20026 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20027 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20028 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20029 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20030 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20031 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20032 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20033 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20034 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20035 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20036 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20037 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20038 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20039 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20040 /* Set the VLAN bit map */
20041 if (phba->valid_vlan) {
20042 fcf_record->vlan_bitmap[phba->vlan_id / 8]
20043 = 1 << (phba->vlan_id % 8);
20048 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20049 * @phba: pointer to lpfc hba data structure.
20050 * @fcf_index: FCF table entry offset.
20052 * This routine is invoked to scan the entire FCF table by reading FCF
20053 * record and processing it one at a time starting from the @fcf_index
20054 * for initial FCF discovery or fast FCF failover rediscovery.
20056 * Return 0 if the mailbox command is submitted successfully, none 0
20060 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20063 LPFC_MBOXQ_t *mboxq;
20065 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20066 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20067 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20070 "2000 Failed to allocate mbox for "
20073 goto fail_fcf_scan;
20075 /* Construct the read FCF record mailbox command */
20076 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20079 goto fail_fcf_scan;
20081 /* Issue the mailbox command asynchronously */
20082 mboxq->vport = phba->pport;
20083 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20085 spin_lock_irq(&phba->hbalock);
20086 phba->hba_flag |= FCF_TS_INPROG;
20087 spin_unlock_irq(&phba->hbalock);
20089 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20090 if (rc == MBX_NOT_FINISHED)
20093 /* Reset eligible FCF count for new scan */
20094 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20095 phba->fcf.eligible_fcf_cnt = 0;
20101 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20102 /* FCF scan failed, clear FCF_TS_INPROG flag */
20103 spin_lock_irq(&phba->hbalock);
20104 phba->hba_flag &= ~FCF_TS_INPROG;
20105 spin_unlock_irq(&phba->hbalock);
20111 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20112 * @phba: pointer to lpfc hba data structure.
20113 * @fcf_index: FCF table entry offset.
20115 * This routine is invoked to read an FCF record indicated by @fcf_index
20116 * and to use it for FLOGI roundrobin FCF failover.
20118 * Return 0 if the mailbox command is submitted successfully, none 0
20122 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20125 LPFC_MBOXQ_t *mboxq;
20127 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20129 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20130 "2763 Failed to allocate mbox for "
20133 goto fail_fcf_read;
20135 /* Construct the read FCF record mailbox command */
20136 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20139 goto fail_fcf_read;
20141 /* Issue the mailbox command asynchronously */
20142 mboxq->vport = phba->pport;
20143 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20144 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20145 if (rc == MBX_NOT_FINISHED)
20151 if (error && mboxq)
20152 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20157 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20158 * @phba: pointer to lpfc hba data structure.
20159 * @fcf_index: FCF table entry offset.
20161 * This routine is invoked to read an FCF record indicated by @fcf_index to
20162 * determine whether it's eligible for FLOGI roundrobin failover list.
20164 * Return 0 if the mailbox command is submitted successfully, none 0
20168 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20171 LPFC_MBOXQ_t *mboxq;
20173 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20175 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20176 "2758 Failed to allocate mbox for "
20179 goto fail_fcf_read;
20181 /* Construct the read FCF record mailbox command */
20182 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20185 goto fail_fcf_read;
20187 /* Issue the mailbox command asynchronously */
20188 mboxq->vport = phba->pport;
20189 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20190 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20191 if (rc == MBX_NOT_FINISHED)
20197 if (error && mboxq)
20198 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20203 * lpfc_check_next_fcf_pri_level
20204 * @phba: pointer to the lpfc_hba struct for this port.
20205 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20206 * routine when the rr_bmask is empty. The FCF indecies are put into the
20207 * rr_bmask based on their priority level. Starting from the highest priority
20208 * to the lowest. The most likely FCF candidate will be in the highest
20209 * priority group. When this routine is called it searches the fcf_pri list for
20210 * next lowest priority group and repopulates the rr_bmask with only those
20213 * 1=success 0=failure
20216 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20218 uint16_t next_fcf_pri;
20219 uint16_t last_index;
20220 struct lpfc_fcf_pri *fcf_pri;
20224 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20225 LPFC_SLI4_FCF_TBL_INDX_MAX);
20226 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20227 "3060 Last IDX %d\n", last_index);
20229 /* Verify the priority list has 2 or more entries */
20230 spin_lock_irq(&phba->hbalock);
20231 if (list_empty(&phba->fcf.fcf_pri_list) ||
20232 list_is_singular(&phba->fcf.fcf_pri_list)) {
20233 spin_unlock_irq(&phba->hbalock);
20234 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20235 "3061 Last IDX %d\n", last_index);
20236 return 0; /* Empty rr list */
20238 spin_unlock_irq(&phba->hbalock);
20242 * Clear the rr_bmask and set all of the bits that are at this
20245 memset(phba->fcf.fcf_rr_bmask, 0,
20246 sizeof(*phba->fcf.fcf_rr_bmask));
20247 spin_lock_irq(&phba->hbalock);
20248 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20249 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20252 * the 1st priority that has not FLOGI failed
20253 * will be the highest.
20256 next_fcf_pri = fcf_pri->fcf_rec.priority;
20257 spin_unlock_irq(&phba->hbalock);
20258 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20259 rc = lpfc_sli4_fcf_rr_index_set(phba,
20260 fcf_pri->fcf_rec.fcf_index);
20264 spin_lock_irq(&phba->hbalock);
20267 * if next_fcf_pri was not set above and the list is not empty then
20268 * we have failed flogis on all of them. So reset flogi failed
20269 * and start at the beginning.
20271 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20272 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20273 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20275 * the 1st priority that has not FLOGI failed
20276 * will be the highest.
20279 next_fcf_pri = fcf_pri->fcf_rec.priority;
20280 spin_unlock_irq(&phba->hbalock);
20281 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20282 rc = lpfc_sli4_fcf_rr_index_set(phba,
20283 fcf_pri->fcf_rec.fcf_index);
20287 spin_lock_irq(&phba->hbalock);
20291 spin_unlock_irq(&phba->hbalock);
20296 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20297 * @phba: pointer to lpfc hba data structure.
20299 * This routine is to get the next eligible FCF record index in a round
20300 * robin fashion. If the next eligible FCF record index equals to the
20301 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20302 * shall be returned, otherwise, the next eligible FCF record's index
20303 * shall be returned.
20306 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20308 uint16_t next_fcf_index;
20311 /* Search start from next bit of currently registered FCF index */
20312 next_fcf_index = phba->fcf.current_rec.fcf_indx;
20315 /* Determine the next fcf index to check */
20316 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20317 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20318 LPFC_SLI4_FCF_TBL_INDX_MAX,
20321 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20322 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20324 * If we have wrapped then we need to clear the bits that
20325 * have been tested so that we can detect when we should
20326 * change the priority level.
20328 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20329 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
20333 /* Check roundrobin failover list empty condition */
20334 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20335 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20337 * If next fcf index is not found check if there are lower
20338 * Priority level fcf's in the fcf_priority list.
20339 * Set up the rr_bmask with all of the avaiable fcf bits
20340 * at that level and continue the selection process.
20342 if (lpfc_check_next_fcf_pri_level(phba))
20343 goto initial_priority;
20344 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20345 "2844 No roundrobin failover FCF available\n");
20347 return LPFC_FCOE_FCF_NEXT_NONE;
20350 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20351 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20352 LPFC_FCF_FLOGI_FAILED) {
20353 if (list_is_singular(&phba->fcf.fcf_pri_list))
20354 return LPFC_FCOE_FCF_NEXT_NONE;
20356 goto next_priority;
20359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20360 "2845 Get next roundrobin failover FCF (x%x)\n",
20363 return next_fcf_index;
20367 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20368 * @phba: pointer to lpfc hba data structure.
20369 * @fcf_index: index into the FCF table to 'set'
20371 * This routine sets the FCF record index in to the eligible bmask for
20372 * roundrobin failover search. It checks to make sure that the index
20373 * does not go beyond the range of the driver allocated bmask dimension
20374 * before setting the bit.
20376 * Returns 0 if the index bit successfully set, otherwise, it returns
20380 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20382 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20383 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20384 "2610 FCF (x%x) reached driver's book "
20385 "keeping dimension:x%x\n",
20386 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20389 /* Set the eligible FCF record index bmask */
20390 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20393 "2790 Set FCF (x%x) to roundrobin FCF failover "
20394 "bmask\n", fcf_index);
20400 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20401 * @phba: pointer to lpfc hba data structure.
20402 * @fcf_index: index into the FCF table to 'clear'
20404 * This routine clears the FCF record index from the eligible bmask for
20405 * roundrobin failover search. It checks to make sure that the index
20406 * does not go beyond the range of the driver allocated bmask dimension
20407 * before clearing the bit.
20410 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20412 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20413 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20414 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20415 "2762 FCF (x%x) reached driver's book "
20416 "keeping dimension:x%x\n",
20417 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20420 /* Clear the eligible FCF record index bmask */
20421 spin_lock_irq(&phba->hbalock);
20422 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20424 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20425 list_del_init(&fcf_pri->list);
20429 spin_unlock_irq(&phba->hbalock);
20430 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20432 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20433 "2791 Clear FCF (x%x) from roundrobin failover "
20434 "bmask\n", fcf_index);
20438 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20439 * @phba: pointer to lpfc hba data structure.
20440 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20442 * This routine is the completion routine for the rediscover FCF table mailbox
20443 * command. If the mailbox command returned failure, it will try to stop the
20444 * FCF rediscover wait timer.
20447 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20449 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20450 uint32_t shdr_status, shdr_add_status;
20452 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20454 shdr_status = bf_get(lpfc_mbox_hdr_status,
20455 &redisc_fcf->header.cfg_shdr.response);
20456 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20457 &redisc_fcf->header.cfg_shdr.response);
20458 if (shdr_status || shdr_add_status) {
20459 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20460 "2746 Requesting for FCF rediscovery failed "
20461 "status x%x add_status x%x\n",
20462 shdr_status, shdr_add_status);
20463 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20464 spin_lock_irq(&phba->hbalock);
20465 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20466 spin_unlock_irq(&phba->hbalock);
20468 * CVL event triggered FCF rediscover request failed,
20469 * last resort to re-try current registered FCF entry.
20471 lpfc_retry_pport_discovery(phba);
20473 spin_lock_irq(&phba->hbalock);
20474 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20475 spin_unlock_irq(&phba->hbalock);
20477 * DEAD FCF event triggered FCF rediscover request
20478 * failed, last resort to fail over as a link down
20479 * to FCF registration.
20481 lpfc_sli4_fcf_dead_failthrough(phba);
20484 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20485 "2775 Start FCF rediscover quiescent timer\n");
20487 * Start FCF rediscovery wait timer for pending FCF
20488 * before rescan FCF record table.
20490 lpfc_fcf_redisc_wait_start_timer(phba);
20493 mempool_free(mbox, phba->mbox_mem_pool);
20497 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20498 * @phba: pointer to lpfc hba data structure.
20500 * This routine is invoked to request for rediscovery of the entire FCF table
20504 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20506 LPFC_MBOXQ_t *mbox;
20507 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20510 /* Cancel retry delay timers to all vports before FCF rediscover */
20511 lpfc_cancel_all_vport_retry_delay_timer(phba);
20513 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20515 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20516 "2745 Failed to allocate mbox for "
20517 "requesting FCF rediscover.\n");
20521 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20522 sizeof(struct lpfc_sli4_cfg_mhdr));
20523 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20524 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20525 length, LPFC_SLI4_MBX_EMBED);
20527 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20528 /* Set count to 0 for invalidating the entire FCF database */
20529 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20531 /* Issue the mailbox command asynchronously */
20532 mbox->vport = phba->pport;
20533 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20534 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20536 if (rc == MBX_NOT_FINISHED) {
20537 mempool_free(mbox, phba->mbox_mem_pool);
20544 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20545 * @phba: pointer to lpfc hba data structure.
20547 * This function is the failover routine as a last resort to the FCF DEAD
20548 * event when driver failed to perform fast FCF failover.
20551 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20553 uint32_t link_state;
20556 * Last resort as FCF DEAD event failover will treat this as
20557 * a link down, but save the link state because we don't want
20558 * it to be changed to Link Down unless it is already down.
20560 link_state = phba->link_state;
20561 lpfc_linkdown(phba);
20562 phba->link_state = link_state;
20564 /* Unregister FCF if no devices connected to it */
20565 lpfc_unregister_unused_fcf(phba);
20569 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20570 * @phba: pointer to lpfc hba data structure.
20571 * @rgn23_data: pointer to configure region 23 data.
20573 * This function gets SLI3 port configure region 23 data through memory dump
20574 * mailbox command. When it successfully retrieves data, the size of the data
20575 * will be returned, otherwise, 0 will be returned.
20578 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20580 LPFC_MBOXQ_t *pmb = NULL;
20582 uint32_t offset = 0;
20588 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20591 "2600 failed to allocate mailbox memory\n");
20597 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20598 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20600 if (rc != MBX_SUCCESS) {
20601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20602 "2601 failed to read config "
20603 "region 23, rc 0x%x Status 0x%x\n",
20604 rc, mb->mbxStatus);
20605 mb->un.varDmp.word_cnt = 0;
20608 * dump mem may return a zero when finished or we got a
20609 * mailbox error, either way we are done.
20611 if (mb->un.varDmp.word_cnt == 0)
20614 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20615 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20617 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20618 rgn23_data + offset,
20619 mb->un.varDmp.word_cnt);
20620 offset += mb->un.varDmp.word_cnt;
20621 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20623 mempool_free(pmb, phba->mbox_mem_pool);
20628 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20629 * @phba: pointer to lpfc hba data structure.
20630 * @rgn23_data: pointer to configure region 23 data.
20632 * This function gets SLI4 port configure region 23 data through memory dump
20633 * mailbox command. When it successfully retrieves data, the size of the data
20634 * will be returned, otherwise, 0 will be returned.
20637 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20639 LPFC_MBOXQ_t *mboxq = NULL;
20640 struct lpfc_dmabuf *mp = NULL;
20641 struct lpfc_mqe *mqe;
20642 uint32_t data_length = 0;
20648 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20650 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20651 "3105 failed to allocate mailbox memory\n");
20655 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20657 mqe = &mboxq->u.mqe;
20658 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20659 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20662 data_length = mqe->un.mb_words[5];
20663 if (data_length == 0)
20665 if (data_length > DMP_RGN23_SIZE) {
20669 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20671 mempool_free(mboxq, phba->mbox_mem_pool);
20673 lpfc_mbuf_free(phba, mp->virt, mp->phys);
20676 return data_length;
20680 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20681 * @phba: pointer to lpfc hba data structure.
20683 * This function read region 23 and parse TLV for port status to
20684 * decide if the user disaled the port. If the TLV indicates the
20685 * port is disabled, the hba_flag is set accordingly.
20688 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20690 uint8_t *rgn23_data = NULL;
20691 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20692 uint32_t offset = 0;
20694 /* Get adapter Region 23 data */
20695 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20699 if (phba->sli_rev < LPFC_SLI_REV4)
20700 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20702 if_type = bf_get(lpfc_sli_intf_if_type,
20703 &phba->sli4_hba.sli_intf);
20704 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20706 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20712 /* Check the region signature first */
20713 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20715 "2619 Config region 23 has bad signature\n");
20720 /* Check the data structure version */
20721 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20723 "2620 Config region 23 has bad version\n");
20728 /* Parse TLV entries in the region */
20729 while (offset < data_size) {
20730 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20733 * If the TLV is not driver specific TLV or driver id is
20734 * not linux driver id, skip the record.
20736 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20737 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20738 (rgn23_data[offset + 3] != 0)) {
20739 offset += rgn23_data[offset + 1] * 4 + 4;
20743 /* Driver found a driver specific TLV in the config region */
20744 sub_tlv_len = rgn23_data[offset + 1] * 4;
20749 * Search for configured port state sub-TLV.
20751 while ((offset < data_size) &&
20752 (tlv_offset < sub_tlv_len)) {
20753 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20758 if (rgn23_data[offset] != PORT_STE_TYPE) {
20759 offset += rgn23_data[offset + 1] * 4 + 4;
20760 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20764 /* This HBA contains PORT_STE configured */
20765 if (!rgn23_data[offset + 2])
20766 phba->hba_flag |= LINK_DISABLED;
20778 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20779 * @phba: pointer to lpfc hba data structure
20780 * @shdr_status: wr_object rsp's status field
20781 * @shdr_add_status: wr_object rsp's add_status field
20782 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20783 * @shdr_change_status: wr_object rsp's change_status field
20784 * @shdr_csf: wr_object rsp's csf bit
20786 * This routine is intended to be called after a firmware write completes.
20787 * It will log next action items to be performed by the user to instantiate
20788 * the newly downloaded firmware or reason for incompatibility.
20791 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20792 u32 shdr_add_status, u32 shdr_add_status_2,
20793 u32 shdr_change_status, u32 shdr_csf)
20795 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20796 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20797 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20798 "change_status x%02x, csf %01x\n", __func__,
20799 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20800 shdr_status, shdr_add_status, shdr_add_status_2,
20801 shdr_change_status, shdr_csf);
20803 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20804 switch (shdr_add_status_2) {
20805 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20806 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20807 "4199 Firmware write failed: "
20808 "image incompatible with flash x%02x\n",
20809 phba->sli4_hba.flash_id);
20811 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20812 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20813 "4200 Firmware write failed: "
20814 "image incompatible with ASIC "
20815 "architecture x%02x\n",
20816 phba->sli4_hba.asic_rev);
20819 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20820 "4210 Firmware write failed: "
20821 "add_status_2 x%02x\n",
20822 shdr_add_status_2);
20825 } else if (!shdr_status && !shdr_add_status) {
20826 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20827 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20829 shdr_change_status =
20830 LPFC_CHANGE_STATUS_PCI_RESET;
20833 switch (shdr_change_status) {
20834 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20836 "3198 Firmware write complete: System "
20837 "reboot required to instantiate\n");
20839 case (LPFC_CHANGE_STATUS_FW_RESET):
20840 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20841 "3199 Firmware write complete: "
20842 "Firmware reset required to "
20845 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20846 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20847 "3200 Firmware write complete: Port "
20848 "Migration or PCI Reset required to "
20851 case (LPFC_CHANGE_STATUS_PCI_RESET):
20852 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20853 "3201 Firmware write complete: PCI "
20854 "Reset required to instantiate\n");
20863 * lpfc_wr_object - write an object to the firmware
20864 * @phba: HBA structure that indicates port to create a queue on.
20865 * @dmabuf_list: list of dmabufs to write to the port.
20866 * @size: the total byte value of the objects to write to the port.
20867 * @offset: the current offset to be used to start the transfer.
20869 * This routine will create a wr_object mailbox command to send to the port.
20870 * the mailbox command will be constructed using the dma buffers described in
20871 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20872 * BDEs that the imbedded mailbox can support. The @offset variable will be
20873 * used to indicate the starting offset of the transfer and will also return
20874 * the offset after the write object mailbox has completed. @size is used to
20875 * determine the end of the object and whether the eof bit should be set.
20877 * Return 0 is successful and offset will contain the the new offset to use
20878 * for the next write.
20879 * Return negative value for error cases.
20882 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20883 uint32_t size, uint32_t *offset)
20885 struct lpfc_mbx_wr_object *wr_object;
20886 LPFC_MBOXQ_t *mbox;
20888 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20889 uint32_t shdr_change_status = 0, shdr_csf = 0;
20891 struct lpfc_dmabuf *dmabuf;
20892 uint32_t written = 0;
20893 bool check_change_status = false;
20895 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20899 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20900 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20901 sizeof(struct lpfc_mbx_wr_object) -
20902 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20904 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20905 wr_object->u.request.write_offset = *offset;
20906 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20907 wr_object->u.request.object_name[0] =
20908 cpu_to_le32(wr_object->u.request.object_name[0]);
20909 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20910 list_for_each_entry(dmabuf, dmabuf_list, list) {
20911 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20913 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20914 wr_object->u.request.bde[i].addrHigh =
20915 putPaddrHigh(dmabuf->phys);
20916 if (written + SLI4_PAGE_SIZE >= size) {
20917 wr_object->u.request.bde[i].tus.f.bdeSize =
20919 written += (size - written);
20920 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20921 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20922 check_change_status = true;
20924 wr_object->u.request.bde[i].tus.f.bdeSize =
20926 written += SLI4_PAGE_SIZE;
20930 wr_object->u.request.bde_count = i;
20931 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20932 if (!phba->sli4_hba.intr_enable)
20933 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20935 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20936 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20938 /* The IOCTL status is embedded in the mailbox subheader. */
20939 shdr_status = bf_get(lpfc_mbox_hdr_status,
20940 &wr_object->header.cfg_shdr.response);
20941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20942 &wr_object->header.cfg_shdr.response);
20943 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20944 &wr_object->header.cfg_shdr.response);
20945 if (check_change_status) {
20946 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20947 &wr_object->u.response);
20948 shdr_csf = bf_get(lpfc_wr_object_csf,
20949 &wr_object->u.response);
20952 if (!phba->sli4_hba.intr_enable)
20953 mempool_free(mbox, phba->mbox_mem_pool);
20954 else if (rc != MBX_TIMEOUT)
20955 mempool_free(mbox, phba->mbox_mem_pool);
20956 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20958 "3025 Write Object mailbox failed with "
20959 "status x%x add_status x%x, add_status_2 x%x, "
20960 "mbx status x%x\n",
20961 shdr_status, shdr_add_status, shdr_add_status_2,
20964 *offset = shdr_add_status;
20966 *offset += wr_object->u.response.actual_write_length;
20969 if (rc || check_change_status)
20970 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20971 shdr_add_status_2, shdr_change_status,
20977 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20978 * @vport: pointer to vport data structure.
20980 * This function iterate through the mailboxq and clean up all REG_LOGIN
20981 * and REG_VPI mailbox commands associated with the vport. This function
20982 * is called when driver want to restart discovery of the vport due to
20983 * a Clear Virtual Link event.
20986 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20988 struct lpfc_hba *phba = vport->phba;
20989 LPFC_MBOXQ_t *mb, *nextmb;
20990 struct lpfc_dmabuf *mp;
20991 struct lpfc_nodelist *ndlp;
20992 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20993 LIST_HEAD(mbox_cmd_list);
20994 uint8_t restart_loop;
20996 /* Clean up internally queued mailbox commands with the vport */
20997 spin_lock_irq(&phba->hbalock);
20998 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20999 if (mb->vport != vport)
21002 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21003 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21006 list_move_tail(&mb->list, &mbox_cmd_list);
21008 /* Clean up active mailbox command with the vport */
21009 mb = phba->sli.mbox_active;
21010 if (mb && (mb->vport == vport)) {
21011 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21012 (mb->u.mb.mbxCommand == MBX_REG_VPI))
21013 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21014 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21015 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21016 /* Put reference count for delayed processing */
21017 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21018 /* Unregister the RPI when mailbox complete */
21019 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21022 /* Cleanup any mailbox completions which are not yet processed */
21025 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21027 * If this mailox is already processed or it is
21028 * for another vport ignore it.
21030 if ((mb->vport != vport) ||
21031 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21034 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21035 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21038 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21039 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21040 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21041 /* Unregister the RPI when mailbox complete */
21042 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21044 spin_unlock_irq(&phba->hbalock);
21045 spin_lock(&ndlp->lock);
21046 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21047 spin_unlock(&ndlp->lock);
21048 spin_lock_irq(&phba->hbalock);
21052 } while (restart_loop);
21054 spin_unlock_irq(&phba->hbalock);
21056 /* Release the cleaned-up mailbox commands */
21057 while (!list_empty(&mbox_cmd_list)) {
21058 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21059 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21060 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
21062 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
21065 mb->ctx_buf = NULL;
21066 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21067 mb->ctx_ndlp = NULL;
21069 spin_lock(&ndlp->lock);
21070 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21071 spin_unlock(&ndlp->lock);
21072 lpfc_nlp_put(ndlp);
21075 mempool_free(mb, phba->mbox_mem_pool);
21078 /* Release the ndlp with the cleaned-up active mailbox command */
21079 if (act_mbx_ndlp) {
21080 spin_lock(&act_mbx_ndlp->lock);
21081 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21082 spin_unlock(&act_mbx_ndlp->lock);
21083 lpfc_nlp_put(act_mbx_ndlp);
21088 * lpfc_drain_txq - Drain the txq
21089 * @phba: Pointer to HBA context object.
21091 * This function attempt to submit IOCBs on the txq
21092 * to the adapter. For SLI4 adapters, the txq contains
21093 * ELS IOCBs that have been deferred because the there
21094 * are no SGLs. This congestion can occur with large
21095 * vport counts during node discovery.
21099 lpfc_drain_txq(struct lpfc_hba *phba)
21101 LIST_HEAD(completions);
21102 struct lpfc_sli_ring *pring;
21103 struct lpfc_iocbq *piocbq = NULL;
21104 unsigned long iflags = 0;
21105 char *fail_msg = NULL;
21106 struct lpfc_sglq *sglq;
21107 union lpfc_wqe128 wqe;
21108 uint32_t txq_cnt = 0;
21109 struct lpfc_queue *wq;
21111 if (phba->link_flag & LS_MDS_LOOPBACK) {
21112 /* MDS WQE are posted only to first WQ*/
21113 wq = phba->sli4_hba.hdwq[0].io_wq;
21118 wq = phba->sli4_hba.els_wq;
21121 pring = lpfc_phba_elsring(phba);
21124 if (unlikely(!pring) || list_empty(&pring->txq))
21127 spin_lock_irqsave(&pring->ring_lock, iflags);
21128 list_for_each_entry(piocbq, &pring->txq, list) {
21132 if (txq_cnt > pring->txq_max)
21133 pring->txq_max = txq_cnt;
21135 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21137 while (!list_empty(&pring->txq)) {
21138 spin_lock_irqsave(&pring->ring_lock, iflags);
21140 piocbq = lpfc_sli_ringtx_get(phba, pring);
21142 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21144 "2823 txq empty and txq_cnt is %d\n ",
21148 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
21150 __lpfc_sli_ringtx_put(phba, pring, piocbq);
21151 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21156 /* The xri and iocb resources secured,
21157 * attempt to issue request
21159 piocbq->sli4_lxritag = sglq->sli4_lxritag;
21160 piocbq->sli4_xritag = sglq->sli4_xritag;
21161 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
21162 fail_msg = "to convert bpl to sgl";
21163 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
21164 fail_msg = "to convert iocb to wqe";
21165 else if (lpfc_sli4_wq_put(wq, &wqe))
21166 fail_msg = " - Wq is full";
21168 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
21171 /* Failed means we can't issue and need to cancel */
21172 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21173 "2822 IOCB failed %s iotag 0x%x "
21176 piocbq->iotag, piocbq->sli4_xritag);
21177 list_add_tail(&piocbq->list, &completions);
21180 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21183 /* Cancel all the IOCBs that cannot be issued */
21184 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21185 IOERR_SLI_ABORTED);
21191 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21192 * @phba: Pointer to HBA context object.
21193 * @pwqeq: Pointer to command WQE.
21194 * @sglq: Pointer to the scatter gather queue object.
21196 * This routine converts the bpl or bde that is in the WQE
21197 * to a sgl list for the sli4 hardware. The physical address
21198 * of the bpl/bde is converted back to a virtual address.
21199 * If the WQE contains a BPL then the list of BDE's is
21200 * converted to sli4_sge's. If the WQE contains a single
21201 * BDE then it is converted to a single sli_sge.
21202 * The WQE is still in cpu endianness so the contents of
21203 * the bpl can be used without byte swapping.
21205 * Returns valid XRI = Success, NO_XRI = Failure.
21208 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21209 struct lpfc_sglq *sglq)
21211 uint16_t xritag = NO_XRI;
21212 struct ulp_bde64 *bpl = NULL;
21213 struct ulp_bde64 bde;
21214 struct sli4_sge *sgl = NULL;
21215 struct lpfc_dmabuf *dmabuf;
21216 union lpfc_wqe128 *wqe;
21219 uint32_t offset = 0; /* accumulated offset in the sg request list */
21220 int inbound = 0; /* number of sg reply entries inbound from firmware */
21223 if (!pwqeq || !sglq)
21226 sgl = (struct sli4_sge *)sglq->sgl;
21228 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21230 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21231 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21232 return sglq->sli4_xritag;
21233 numBdes = pwqeq->rsvd2;
21235 /* The addrHigh and addrLow fields within the WQE
21236 * have not been byteswapped yet so there is no
21237 * need to swap them back.
21239 if (pwqeq->context3)
21240 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
21244 bpl = (struct ulp_bde64 *)dmabuf->virt;
21248 for (i = 0; i < numBdes; i++) {
21249 /* Should already be byte swapped. */
21250 sgl->addr_hi = bpl->addrHigh;
21251 sgl->addr_lo = bpl->addrLow;
21253 sgl->word2 = le32_to_cpu(sgl->word2);
21254 if ((i+1) == numBdes)
21255 bf_set(lpfc_sli4_sge_last, sgl, 1);
21257 bf_set(lpfc_sli4_sge_last, sgl, 0);
21258 /* swap the size field back to the cpu so we
21259 * can assign it to the sgl.
21261 bde.tus.w = le32_to_cpu(bpl->tus.w);
21262 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21263 /* The offsets in the sgl need to be accumulated
21264 * separately for the request and reply lists.
21265 * The request is always first, the reply follows.
21268 case CMD_GEN_REQUEST64_WQE:
21269 /* add up the reply sg entries */
21270 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21272 /* first inbound? reset the offset */
21275 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21276 bf_set(lpfc_sli4_sge_type, sgl,
21277 LPFC_SGE_TYPE_DATA);
21278 offset += bde.tus.f.bdeSize;
21280 case CMD_FCP_TRSP64_WQE:
21281 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21282 bf_set(lpfc_sli4_sge_type, sgl,
21283 LPFC_SGE_TYPE_DATA);
21285 case CMD_FCP_TSEND64_WQE:
21286 case CMD_FCP_TRECEIVE64_WQE:
21287 bf_set(lpfc_sli4_sge_type, sgl,
21288 bpl->tus.f.bdeFlags);
21292 offset += bde.tus.f.bdeSize;
21293 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21296 sgl->word2 = cpu_to_le32(sgl->word2);
21300 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21301 /* The addrHigh and addrLow fields of the BDE have not
21302 * been byteswapped yet so they need to be swapped
21303 * before putting them in the sgl.
21305 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21306 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21307 sgl->word2 = le32_to_cpu(sgl->word2);
21308 bf_set(lpfc_sli4_sge_last, sgl, 1);
21309 sgl->word2 = cpu_to_le32(sgl->word2);
21310 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21312 return sglq->sli4_xritag;
21316 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21317 * @phba: Pointer to HBA context object.
21318 * @qp: Pointer to HDW queue.
21319 * @pwqe: Pointer to command WQE.
21322 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21323 struct lpfc_iocbq *pwqe)
21325 union lpfc_wqe128 *wqe = &pwqe->wqe;
21326 struct lpfc_async_xchg_ctx *ctxp;
21327 struct lpfc_queue *wq;
21328 struct lpfc_sglq *sglq;
21329 struct lpfc_sli_ring *pring;
21330 unsigned long iflags;
21333 /* NVME_LS and NVME_LS ABTS requests. */
21334 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
21335 pring = phba->sli4_hba.nvmels_wq->pring;
21336 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21338 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21340 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21343 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21344 pwqe->sli4_xritag = sglq->sli4_xritag;
21345 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21346 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21349 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21350 pwqe->sli4_xritag);
21351 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21353 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21357 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21358 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21360 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21364 /* NVME_FCREQ and NVME_ABTS requests */
21365 if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21366 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21370 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21372 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21374 ret = lpfc_sli4_wq_put(wq, wqe);
21376 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21379 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21380 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21382 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21386 /* NVMET requests */
21387 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
21388 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21392 ctxp = pwqe->context2;
21393 sglq = ctxp->ctxbuf->sglq;
21394 if (pwqe->sli4_xritag == NO_XRI) {
21395 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21396 pwqe->sli4_xritag = sglq->sli4_xritag;
21398 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21399 pwqe->sli4_xritag);
21400 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21402 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21404 ret = lpfc_sli4_wq_put(wq, wqe);
21406 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21409 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21410 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21412 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21419 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21420 * @phba: Pointer to HBA context object.
21421 * @cmdiocb: Pointer to driver command iocb object.
21422 * @cmpl: completion function.
21424 * Fill the appropriate fields for the abort WQE and call
21425 * internal routine lpfc_sli4_issue_wqe to send the WQE
21426 * This function is called with hbalock held and no ring_lock held.
21428 * RETURNS 0 - SUCCESS
21432 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21435 struct lpfc_vport *vport = cmdiocb->vport;
21436 struct lpfc_iocbq *abtsiocb = NULL;
21437 union lpfc_wqe128 *abtswqe;
21438 struct lpfc_io_buf *lpfc_cmd;
21439 int retval = IOCB_ERROR;
21440 u16 xritag = cmdiocb->sli4_xritag;
21443 * The scsi command can not be in txq and it is in flight because the
21444 * pCmd is still pointing at the SCSI command we have to abort. There
21445 * is no need to search the txcmplq. Just send an abort to the FW.
21448 abtsiocb = __lpfc_sli_get_iocbq(phba);
21450 return WQE_NORESOURCE;
21452 /* Indicate the IO is being aborted by the driver. */
21453 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
21455 abtswqe = &abtsiocb->wqe;
21456 memset(abtswqe, 0, sizeof(*abtswqe));
21458 if (!lpfc_is_link_up(phba))
21459 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21460 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21461 abtswqe->abort_cmd.rsrvd5 = 0;
21462 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21463 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21464 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21465 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21466 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21467 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21468 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21470 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21471 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21472 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
21473 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
21474 abtsiocb->iocb_flag |= LPFC_IO_FCP;
21475 if (cmdiocb->iocb_flag & LPFC_IO_NVME)
21476 abtsiocb->iocb_flag |= LPFC_IO_NVME;
21477 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
21478 abtsiocb->iocb_flag |= LPFC_IO_FOF;
21479 abtsiocb->vport = vport;
21480 abtsiocb->wqe_cmpl = cmpl;
21482 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21483 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21485 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21486 "0359 Abort xri x%x, original iotag x%x, "
21487 "abort cmd iotag x%x retval x%x\n",
21488 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21491 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
21492 __lpfc_sli_release_iocbq(phba, abtsiocb);
21498 #ifdef LPFC_MXP_STAT
21500 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21501 * @phba: pointer to lpfc hba data structure.
21502 * @hwqid: belong to which HWQ.
21504 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21505 * 15 seconds after a test case is running.
21507 * The user should call lpfc_debugfs_multixripools_write before running a test
21508 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21509 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21510 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21511 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21513 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21515 struct lpfc_sli4_hdw_queue *qp;
21516 struct lpfc_multixri_pool *multixri_pool;
21517 struct lpfc_pvt_pool *pvt_pool;
21518 struct lpfc_pbl_pool *pbl_pool;
21521 qp = &phba->sli4_hba.hdwq[hwqid];
21522 multixri_pool = qp->p_multixri_pool;
21523 if (!multixri_pool)
21526 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21527 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21528 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21529 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21531 multixri_pool->stat_pbl_count = pbl_pool->count;
21532 multixri_pool->stat_pvt_count = pvt_pool->count;
21533 multixri_pool->stat_busy_count = txcmplq_cnt;
21536 multixri_pool->stat_snapshot_taken++;
21541 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21542 * @phba: pointer to lpfc hba data structure.
21543 * @hwqid: belong to which HWQ.
21545 * This routine moves some XRIs from private to public pool when private pool
21548 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21550 struct lpfc_multixri_pool *multixri_pool;
21552 u32 prev_io_req_count;
21554 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21555 if (!multixri_pool)
21557 io_req_count = multixri_pool->io_req_count;
21558 prev_io_req_count = multixri_pool->prev_io_req_count;
21560 if (prev_io_req_count != io_req_count) {
21561 /* Private pool is busy */
21562 multixri_pool->prev_io_req_count = io_req_count;
21564 /* Private pool is not busy.
21565 * Move XRIs from private to public pool.
21567 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21572 * lpfc_adjust_high_watermark - Adjust high watermark
21573 * @phba: pointer to lpfc hba data structure.
21574 * @hwqid: belong to which HWQ.
21576 * This routine sets high watermark as number of outstanding XRIs,
21577 * but make sure the new value is between xri_limit/2 and xri_limit.
21579 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21587 struct lpfc_multixri_pool *multixri_pool;
21588 struct lpfc_sli4_hdw_queue *qp;
21590 qp = &phba->sli4_hba.hdwq[hwqid];
21591 multixri_pool = qp->p_multixri_pool;
21592 if (!multixri_pool)
21594 xri_limit = multixri_pool->xri_limit;
21596 watermark_max = xri_limit;
21597 watermark_min = xri_limit / 2;
21599 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21600 abts_io_bufs = qp->abts_scsi_io_bufs;
21601 abts_io_bufs += qp->abts_nvme_io_bufs;
21603 new_watermark = txcmplq_cnt + abts_io_bufs;
21604 new_watermark = min(watermark_max, new_watermark);
21605 new_watermark = max(watermark_min, new_watermark);
21606 multixri_pool->pvt_pool.high_watermark = new_watermark;
21608 #ifdef LPFC_MXP_STAT
21609 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21615 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21616 * @phba: pointer to lpfc hba data structure.
21617 * @hwqid: belong to which HWQ.
21619 * This routine is called from hearbeat timer when pvt_pool is idle.
21620 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21621 * The first step moves (all - low_watermark) amount of XRIs.
21622 * The second step moves the rest of XRIs.
21624 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21626 struct lpfc_pbl_pool *pbl_pool;
21627 struct lpfc_pvt_pool *pvt_pool;
21628 struct lpfc_sli4_hdw_queue *qp;
21629 struct lpfc_io_buf *lpfc_ncmd;
21630 struct lpfc_io_buf *lpfc_ncmd_next;
21631 unsigned long iflag;
21632 struct list_head tmp_list;
21635 qp = &phba->sli4_hba.hdwq[hwqid];
21636 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21637 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21640 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21641 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21643 if (pvt_pool->count > pvt_pool->low_watermark) {
21644 /* Step 1: move (all - low_watermark) from pvt_pool
21648 /* Move low watermark of bufs from pvt_pool to tmp_list */
21649 INIT_LIST_HEAD(&tmp_list);
21650 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21651 &pvt_pool->list, list) {
21652 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21654 if (tmp_count >= pvt_pool->low_watermark)
21658 /* Move all bufs from pvt_pool to pbl_pool */
21659 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21661 /* Move all bufs from tmp_list to pvt_pool */
21662 list_splice(&tmp_list, &pvt_pool->list);
21664 pbl_pool->count += (pvt_pool->count - tmp_count);
21665 pvt_pool->count = tmp_count;
21667 /* Step 2: move the rest from pvt_pool to pbl_pool */
21668 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21669 pbl_pool->count += pvt_pool->count;
21670 pvt_pool->count = 0;
21673 spin_unlock(&pvt_pool->lock);
21674 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21678 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21679 * @phba: pointer to lpfc hba data structure
21680 * @qp: pointer to HDW queue
21681 * @pbl_pool: specified public free XRI pool
21682 * @pvt_pool: specified private free XRI pool
21683 * @count: number of XRIs to move
21685 * This routine tries to move some free common bufs from the specified pbl_pool
21686 * to the specified pvt_pool. It might move less than count XRIs if there's not
21687 * enough in public pool.
21690 * true - if XRIs are successfully moved from the specified pbl_pool to the
21691 * specified pvt_pool
21692 * false - if the specified pbl_pool is empty or locked by someone else
21695 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21696 struct lpfc_pbl_pool *pbl_pool,
21697 struct lpfc_pvt_pool *pvt_pool, u32 count)
21699 struct lpfc_io_buf *lpfc_ncmd;
21700 struct lpfc_io_buf *lpfc_ncmd_next;
21701 unsigned long iflag;
21704 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21706 if (pbl_pool->count) {
21707 /* Move a batch of XRIs from public to private pool */
21708 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21709 list_for_each_entry_safe(lpfc_ncmd,
21713 list_move_tail(&lpfc_ncmd->list,
21722 spin_unlock(&pvt_pool->lock);
21723 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21726 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21733 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21734 * @phba: pointer to lpfc hba data structure.
21735 * @hwqid: belong to which HWQ.
21736 * @count: number of XRIs to move
21738 * This routine tries to find some free common bufs in one of public pools with
21739 * Round Robin method. The search always starts from local hwqid, then the next
21740 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21741 * a batch of free common bufs are moved to private pool on hwqid.
21742 * It might move less than count XRIs if there's not enough in public pool.
21744 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21746 struct lpfc_multixri_pool *multixri_pool;
21747 struct lpfc_multixri_pool *next_multixri_pool;
21748 struct lpfc_pvt_pool *pvt_pool;
21749 struct lpfc_pbl_pool *pbl_pool;
21750 struct lpfc_sli4_hdw_queue *qp;
21755 qp = &phba->sli4_hba.hdwq[hwqid];
21756 multixri_pool = qp->p_multixri_pool;
21757 pvt_pool = &multixri_pool->pvt_pool;
21758 pbl_pool = &multixri_pool->pbl_pool;
21760 /* Check if local pbl_pool is available */
21761 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21763 #ifdef LPFC_MXP_STAT
21764 multixri_pool->local_pbl_hit_count++;
21769 hwq_count = phba->cfg_hdw_queue;
21771 /* Get the next hwqid which was found last time */
21772 next_hwqid = multixri_pool->rrb_next_hwqid;
21775 /* Go to next hwq */
21776 next_hwqid = (next_hwqid + 1) % hwq_count;
21778 next_multixri_pool =
21779 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21780 pbl_pool = &next_multixri_pool->pbl_pool;
21782 /* Check if the public free xri pool is available */
21783 ret = _lpfc_move_xri_pbl_to_pvt(
21784 phba, qp, pbl_pool, pvt_pool, count);
21786 /* Exit while-loop if success or all hwqid are checked */
21787 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21789 /* Starting point for the next time */
21790 multixri_pool->rrb_next_hwqid = next_hwqid;
21793 /* stats: all public pools are empty*/
21794 multixri_pool->pbl_empty_count++;
21797 #ifdef LPFC_MXP_STAT
21799 if (next_hwqid == hwqid)
21800 multixri_pool->local_pbl_hit_count++;
21802 multixri_pool->other_pbl_hit_count++;
21808 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21809 * @phba: pointer to lpfc hba data structure.
21810 * @hwqid: belong to which HWQ.
21812 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21815 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21817 struct lpfc_multixri_pool *multixri_pool;
21818 struct lpfc_pvt_pool *pvt_pool;
21820 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21821 pvt_pool = &multixri_pool->pvt_pool;
21823 if (pvt_pool->count < pvt_pool->low_watermark)
21824 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21828 * lpfc_release_io_buf - Return one IO buf back to free pool
21829 * @phba: pointer to lpfc hba data structure.
21830 * @lpfc_ncmd: IO buf to be returned.
21831 * @qp: belong to which HWQ.
21833 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21834 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21835 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21836 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21837 * lpfc_io_buf_list_put.
21839 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21840 struct lpfc_sli4_hdw_queue *qp)
21842 unsigned long iflag;
21843 struct lpfc_pbl_pool *pbl_pool;
21844 struct lpfc_pvt_pool *pvt_pool;
21845 struct lpfc_epd_pool *epd_pool;
21851 /* MUST zero fields if buffer is reused by another protocol */
21852 lpfc_ncmd->nvmeCmd = NULL;
21853 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
21854 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
21856 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21857 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21858 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21860 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21861 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21863 if (phba->cfg_xri_rebalancing) {
21864 if (lpfc_ncmd->expedite) {
21865 /* Return to expedite pool */
21866 epd_pool = &phba->epd_pool;
21867 spin_lock_irqsave(&epd_pool->lock, iflag);
21868 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21870 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21874 /* Avoid invalid access if an IO sneaks in and is being rejected
21875 * just _after_ xri pools are destroyed in lpfc_offline.
21876 * Nothing much can be done at this point.
21878 if (!qp->p_multixri_pool)
21881 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21882 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21884 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21885 abts_io_bufs = qp->abts_scsi_io_bufs;
21886 abts_io_bufs += qp->abts_nvme_io_bufs;
21888 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21889 xri_limit = qp->p_multixri_pool->xri_limit;
21891 #ifdef LPFC_MXP_STAT
21892 if (xri_owned <= xri_limit)
21893 qp->p_multixri_pool->below_limit_count++;
21895 qp->p_multixri_pool->above_limit_count++;
21898 /* XRI goes to either public or private free xri pool
21899 * based on watermark and xri_limit
21901 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21902 (xri_owned < xri_limit &&
21903 pvt_pool->count < pvt_pool->high_watermark)) {
21904 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21905 qp, free_pvt_pool);
21906 list_add_tail(&lpfc_ncmd->list,
21909 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21911 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21912 qp, free_pub_pool);
21913 list_add_tail(&lpfc_ncmd->list,
21916 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21919 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21921 list_add_tail(&lpfc_ncmd->list,
21922 &qp->lpfc_io_buf_list_put);
21924 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21930 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21931 * @phba: pointer to lpfc hba data structure.
21932 * @qp: pointer to HDW queue
21933 * @pvt_pool: pointer to private pool data structure.
21934 * @ndlp: pointer to lpfc nodelist data structure.
21936 * This routine tries to get one free IO buf from private pool.
21939 * pointer to one free IO buf - if private pool is not empty
21940 * NULL - if private pool is empty
21942 static struct lpfc_io_buf *
21943 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21944 struct lpfc_sli4_hdw_queue *qp,
21945 struct lpfc_pvt_pool *pvt_pool,
21946 struct lpfc_nodelist *ndlp)
21948 struct lpfc_io_buf *lpfc_ncmd;
21949 struct lpfc_io_buf *lpfc_ncmd_next;
21950 unsigned long iflag;
21952 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21953 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21954 &pvt_pool->list, list) {
21955 if (lpfc_test_rrq_active(
21956 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21958 list_del(&lpfc_ncmd->list);
21960 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21963 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21969 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21970 * @phba: pointer to lpfc hba data structure.
21972 * This routine tries to get one free IO buf from expedite pool.
21975 * pointer to one free IO buf - if expedite pool is not empty
21976 * NULL - if expedite pool is empty
21978 static struct lpfc_io_buf *
21979 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21981 struct lpfc_io_buf *lpfc_ncmd;
21982 struct lpfc_io_buf *lpfc_ncmd_next;
21983 unsigned long iflag;
21984 struct lpfc_epd_pool *epd_pool;
21986 epd_pool = &phba->epd_pool;
21989 spin_lock_irqsave(&epd_pool->lock, iflag);
21990 if (epd_pool->count > 0) {
21991 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21992 &epd_pool->list, list) {
21993 list_del(&lpfc_ncmd->list);
21998 spin_unlock_irqrestore(&epd_pool->lock, iflag);
22004 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
22005 * @phba: pointer to lpfc hba data structure.
22006 * @ndlp: pointer to lpfc nodelist data structure.
22007 * @hwqid: belong to which HWQ
22008 * @expedite: 1 means this request is urgent.
22010 * This routine will do the following actions and then return a pointer to
22013 * 1. If private free xri count is empty, move some XRIs from public to
22015 * 2. Get one XRI from private free xri pool.
22016 * 3. If we fail to get one from pvt_pool and this is an expedite request,
22017 * get one free xri from expedite pool.
22019 * Note: ndlp is only used on SCSI side for RRQ testing.
22020 * The caller should pass NULL for ndlp on NVME side.
22023 * pointer to one free IO buf - if private pool is not empty
22024 * NULL - if private pool is empty
22026 static struct lpfc_io_buf *
22027 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22028 struct lpfc_nodelist *ndlp,
22029 int hwqid, int expedite)
22031 struct lpfc_sli4_hdw_queue *qp;
22032 struct lpfc_multixri_pool *multixri_pool;
22033 struct lpfc_pvt_pool *pvt_pool;
22034 struct lpfc_io_buf *lpfc_ncmd;
22036 qp = &phba->sli4_hba.hdwq[hwqid];
22038 multixri_pool = qp->p_multixri_pool;
22039 pvt_pool = &multixri_pool->pvt_pool;
22040 multixri_pool->io_req_count++;
22042 /* If pvt_pool is empty, move some XRIs from public to private pool */
22043 if (pvt_pool->count == 0)
22044 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22046 /* Get one XRI from private free xri pool */
22047 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22050 lpfc_ncmd->hdwq = qp;
22051 lpfc_ncmd->hdwq_no = hwqid;
22052 } else if (expedite) {
22053 /* If we fail to get one from pvt_pool and this is an expedite
22054 * request, get one free xri from expedite pool.
22056 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22062 static inline struct lpfc_io_buf *
22063 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22065 struct lpfc_sli4_hdw_queue *qp;
22066 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22068 qp = &phba->sli4_hba.hdwq[idx];
22069 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22070 &qp->lpfc_io_buf_list_get, list) {
22071 if (lpfc_test_rrq_active(phba, ndlp,
22072 lpfc_cmd->cur_iocbq.sli4_lxritag))
22075 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22078 list_del_init(&lpfc_cmd->list);
22080 lpfc_cmd->hdwq = qp;
22081 lpfc_cmd->hdwq_no = idx;
22088 * lpfc_get_io_buf - Get one IO buffer from free pool
22089 * @phba: The HBA for which this call is being executed.
22090 * @ndlp: pointer to lpfc nodelist data structure.
22091 * @hwqid: belong to which HWQ
22092 * @expedite: 1 means this request is urgent.
22094 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22095 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22096 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22098 * Note: ndlp is only used on SCSI side for RRQ testing.
22099 * The caller should pass NULL for ndlp on NVME side.
22103 * Pointer to lpfc_io_buf - Success
22105 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22106 struct lpfc_nodelist *ndlp,
22107 u32 hwqid, int expedite)
22109 struct lpfc_sli4_hdw_queue *qp;
22110 unsigned long iflag;
22111 struct lpfc_io_buf *lpfc_cmd;
22113 qp = &phba->sli4_hba.hdwq[hwqid];
22116 if (phba->cfg_xri_rebalancing)
22117 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22118 phba, ndlp, hwqid, expedite);
22120 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22121 qp, alloc_xri_get);
22122 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22123 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22125 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22126 qp, alloc_xri_put);
22127 list_splice(&qp->lpfc_io_buf_list_put,
22128 &qp->lpfc_io_buf_list_get);
22129 qp->get_io_bufs += qp->put_io_bufs;
22130 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22131 qp->put_io_bufs = 0;
22132 spin_unlock(&qp->io_buf_list_put_lock);
22133 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22135 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22137 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22144 * lpfc_read_object - Retrieve object data from HBA
22145 * @phba: The HBA for which this call is being executed.
22146 * @rdobject: Pathname of object data we want to read.
22147 * @datap: Pointer to where data will be copied to.
22148 * @datasz: size of data area
22150 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22151 * The data will be truncated if datasz is not large enough.
22152 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22153 * Returns the actual bytes read from the object.
22156 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22159 struct lpfc_mbx_read_object *read_object;
22160 LPFC_MBOXQ_t *mbox;
22161 int rc, length, eof, j, byte_cnt = 0;
22162 uint32_t shdr_status, shdr_add_status;
22163 union lpfc_sli4_cfg_shdr *shdr;
22164 struct lpfc_dmabuf *pcmd;
22165 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22167 /* sanity check on queue memory */
22171 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22174 length = (sizeof(struct lpfc_mbx_read_object) -
22175 sizeof(struct lpfc_sli4_cfg_mhdr));
22176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22177 LPFC_MBOX_OPCODE_READ_OBJECT,
22178 length, LPFC_SLI4_MBX_EMBED);
22179 read_object = &mbox->u.mqe.un.read_object;
22180 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22182 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22183 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22184 read_object->u.request.rd_object_offset = 0;
22185 read_object->u.request.rd_object_cnt = 1;
22187 memset((void *)read_object->u.request.rd_object_name, 0,
22189 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22190 for (j = 0; j < strlen(rdobject); j++)
22191 read_object->u.request.rd_object_name[j] =
22192 cpu_to_le32(rd_object_name[j]);
22194 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22196 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22197 if (!pcmd || !pcmd->virt) {
22199 mempool_free(mbox, phba->mbox_mem_pool);
22202 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22203 read_object->u.request.rd_object_hbuf[0].pa_lo =
22204 putPaddrLow(pcmd->phys);
22205 read_object->u.request.rd_object_hbuf[0].pa_hi =
22206 putPaddrHigh(pcmd->phys);
22207 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22209 mbox->vport = phba->pport;
22210 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22211 mbox->ctx_buf = NULL;
22212 mbox->ctx_ndlp = NULL;
22214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22215 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22218 if (shdr_status == STATUS_FAILED &&
22219 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22221 "4674 No port cfg file in FW.\n");
22222 byte_cnt = -ENOENT;
22223 } else if (shdr_status || shdr_add_status || rc) {
22224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22225 "2625 READ_OBJECT mailbox failed with "
22226 "status x%x add_status x%x, mbx status x%x\n",
22227 shdr_status, shdr_add_status, rc);
22231 length = read_object->u.response.rd_object_actual_rlen;
22232 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22233 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22234 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22235 length, datasz, eof);
22237 /* Detect the port config file exists but is empty */
22238 if (!length && eof) {
22244 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22248 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22250 mempool_free(mbox, phba->mbox_mem_pool);
22255 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22256 * @phba: The HBA for which this call is being executed.
22257 * @lpfc_buf: IO buf structure to append the SGL chunk
22259 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22260 * and will allocate an SGL chunk if the pool is empty.
22264 * Pointer to sli4_hybrid_sgl - Success
22266 struct sli4_hybrid_sgl *
22267 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22269 struct sli4_hybrid_sgl *list_entry = NULL;
22270 struct sli4_hybrid_sgl *tmp = NULL;
22271 struct sli4_hybrid_sgl *allocated_sgl = NULL;
22272 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22273 struct list_head *buf_list = &hdwq->sgl_list;
22274 unsigned long iflags;
22276 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22278 if (likely(!list_empty(buf_list))) {
22279 /* break off 1 chunk from the sgl_list */
22280 list_for_each_entry_safe(list_entry, tmp,
22281 buf_list, list_node) {
22282 list_move_tail(&list_entry->list_node,
22283 &lpfc_buf->dma_sgl_xtra_list);
22287 /* allocate more */
22288 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22289 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22290 cpu_to_node(hdwq->io_wq->chann));
22292 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22293 "8353 error kmalloc memory for HDWQ "
22295 lpfc_buf->hdwq_no, __func__);
22299 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22300 GFP_ATOMIC, &tmp->dma_phys_sgl);
22301 if (!tmp->dma_sgl) {
22302 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22303 "8354 error pool_alloc memory for HDWQ "
22305 lpfc_buf->hdwq_no, __func__);
22310 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22311 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22314 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22315 struct sli4_hybrid_sgl,
22318 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22320 return allocated_sgl;
22324 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22325 * @phba: The HBA for which this call is being executed.
22326 * @lpfc_buf: IO buf structure with the SGL chunk
22328 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22335 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22338 struct sli4_hybrid_sgl *list_entry = NULL;
22339 struct sli4_hybrid_sgl *tmp = NULL;
22340 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22341 struct list_head *buf_list = &hdwq->sgl_list;
22342 unsigned long iflags;
22344 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22346 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22347 list_for_each_entry_safe(list_entry, tmp,
22348 &lpfc_buf->dma_sgl_xtra_list,
22350 list_move_tail(&list_entry->list_node,
22357 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22362 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22363 * @phba: phba object
22364 * @hdwq: hdwq to cleanup sgl buff resources on
22366 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22372 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22373 struct lpfc_sli4_hdw_queue *hdwq)
22375 struct list_head *buf_list = &hdwq->sgl_list;
22376 struct sli4_hybrid_sgl *list_entry = NULL;
22377 struct sli4_hybrid_sgl *tmp = NULL;
22378 unsigned long iflags;
22380 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22382 /* Free sgl pool */
22383 list_for_each_entry_safe(list_entry, tmp,
22384 buf_list, list_node) {
22385 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22386 list_entry->dma_sgl,
22387 list_entry->dma_phys_sgl);
22388 list_del(&list_entry->list_node);
22392 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22396 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22397 * @phba: The HBA for which this call is being executed.
22398 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22400 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22401 * and will allocate an CMD/RSP buffer if the pool is empty.
22405 * Pointer to fcp_cmd_rsp_buf - Success
22407 struct fcp_cmd_rsp_buf *
22408 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22409 struct lpfc_io_buf *lpfc_buf)
22411 struct fcp_cmd_rsp_buf *list_entry = NULL;
22412 struct fcp_cmd_rsp_buf *tmp = NULL;
22413 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22414 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22415 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22416 unsigned long iflags;
22418 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22420 if (likely(!list_empty(buf_list))) {
22421 /* break off 1 chunk from the list */
22422 list_for_each_entry_safe(list_entry, tmp,
22425 list_move_tail(&list_entry->list_node,
22426 &lpfc_buf->dma_cmd_rsp_list);
22430 /* allocate more */
22431 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22432 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22433 cpu_to_node(hdwq->io_wq->chann));
22435 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22436 "8355 error kmalloc memory for HDWQ "
22438 lpfc_buf->hdwq_no, __func__);
22442 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
22444 &tmp->fcp_cmd_rsp_dma_handle);
22446 if (!tmp->fcp_cmnd) {
22447 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22448 "8356 error pool_alloc memory for HDWQ "
22450 lpfc_buf->hdwq_no, __func__);
22455 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22456 sizeof(struct fcp_cmnd));
22458 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22459 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22462 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22463 struct fcp_cmd_rsp_buf,
22466 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22468 return allocated_buf;
22472 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22473 * @phba: The HBA for which this call is being executed.
22474 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22476 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22483 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22484 struct lpfc_io_buf *lpfc_buf)
22487 struct fcp_cmd_rsp_buf *list_entry = NULL;
22488 struct fcp_cmd_rsp_buf *tmp = NULL;
22489 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22490 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22491 unsigned long iflags;
22493 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22495 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22496 list_for_each_entry_safe(list_entry, tmp,
22497 &lpfc_buf->dma_cmd_rsp_list,
22499 list_move_tail(&list_entry->list_node,
22506 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22511 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22512 * @phba: phba object
22513 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22515 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22521 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22522 struct lpfc_sli4_hdw_queue *hdwq)
22524 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22525 struct fcp_cmd_rsp_buf *list_entry = NULL;
22526 struct fcp_cmd_rsp_buf *tmp = NULL;
22527 unsigned long iflags;
22529 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22531 /* Free cmd_rsp buf pool */
22532 list_for_each_entry_safe(list_entry, tmp,
22535 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22536 list_entry->fcp_cmnd,
22537 list_entry->fcp_cmd_rsp_dma_handle);
22538 list_del(&list_entry->list_node);
22542 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);