1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *
74 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
75 struct lpfc_iocbq *rspiocbq);
76 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
79 struct hbq_dmabuf *dmabuf);
80 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
81 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
82 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
85 struct lpfc_queue *eq,
86 struct lpfc_eqe *eqe);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
101 /* Setup WQE templates for IOs */
102 void lpfc_wqe_cmd_template(void)
104 union lpfc_wqe128 *wqe;
107 wqe = &lpfc_iread_cmd_template;
108 memset(wqe, 0, sizeof(union lpfc_wqe128));
110 /* Word 0, 1, 2 - BDE is variable */
112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
114 /* Word 4 - total_xfer_len is variable */
116 /* Word 5 - is zero */
118 /* Word 6 - ctxt_tag, xri_tag is variable */
121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
126 /* Word 8 - abort_tag is variable */
128 /* Word 9 - reqtag is variable */
130 /* Word 10 - dbde, wqes is variable */
131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
137 /* Word 11 - pbde is variable */
138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
142 /* Word 12 - is zero */
144 /* Word 13, 14, 15 - PBDE is variable */
146 /* IWRITE template */
147 wqe = &lpfc_iwrite_cmd_template;
148 memset(wqe, 0, sizeof(union lpfc_wqe128));
150 /* Word 0, 1, 2 - BDE is variable */
152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
154 /* Word 4 - total_xfer_len is variable */
156 /* Word 5 - initial_xfer_len is variable */
158 /* Word 6 - ctxt_tag, xri_tag is variable */
161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
166 /* Word 8 - abort_tag is variable */
168 /* Word 9 - reqtag is variable */
170 /* Word 10 - dbde, wqes is variable */
171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
177 /* Word 11 - pbde is variable */
178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
182 /* Word 12 - is zero */
184 /* Word 13, 14, 15 - PBDE is variable */
187 wqe = &lpfc_icmnd_cmd_template;
188 memset(wqe, 0, sizeof(union lpfc_wqe128));
190 /* Word 0, 1, 2 - BDE is variable */
192 /* Word 3 - payload_offset_len is variable */
194 /* Word 4, 5 - is zero */
196 /* Word 6 - ctxt_tag, xri_tag is variable */
199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
204 /* Word 8 - abort_tag is variable */
206 /* Word 9 - reqtag is variable */
208 /* Word 10 - dbde, wqes is variable */
209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
220 /* Word 12, 13, 14, 15 - is zero */
223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 * Must be a multiple of sizeof(uint64_t)
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
240 uint64_t *src = srcp;
241 uint64_t *dest = destp;
244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
261 * The caller is expected to hold the hbalock when calling this routine.
264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
266 union lpfc_wqe *temp_wqe;
267 struct lpfc_register doorbell;
274 /* sanity check on queue memory */
278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
280 /* If the host has not yet processed the next entry then we are done */
281 idx = ((q->host_index + 1) % q->entry_count);
282 if (idx == q->hba_index) {
287 /* set consumption flag every once in a while */
288 if (!((q->host_index + 1) % q->notify_interval))
289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 /* write to DPP aperture taking advatage of Combined Writes */
297 tmp = (uint8_t *)temp_wqe;
299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 __raw_writeq(*((uint64_t *)(tmp + i)),
303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 __raw_writel(*((uint32_t *)(tmp + i)),
308 /* ensure WQE bcopy and DPP flushed before doorbell write */
311 /* Update the host index before invoking device */
312 host_index = q->host_index;
318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
330 /* Leave bits <23:16> clear for if_type 6 dpp */
331 if_type = bf_get(lpfc_sli_intf_if_type,
332 &q->phba->sli4_hba.sli_intf);
333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
343 writel(doorbell.word0, q->db_regaddr);
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
361 /* sanity check on queue memory */
365 q->hba_index = index;
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
378 * The caller is expected to hold the hbalock when calling this routine.
381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
383 struct lpfc_mqe *temp_mqe;
384 struct lpfc_register doorbell;
386 /* sanity check on queue memory */
389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
391 /* If the host has not yet processed the next entry then we are done */
392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 /* Save off the mailbox pointer for completion */
396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
398 /* Update the host index before invoking device */
399 q->host_index = ((q->host_index + 1) % q->entry_count);
403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
420 lpfc_sli4_mq_release(struct lpfc_queue *q)
422 /* sanity check on queue memory */
426 /* Clear the mailbox pointer for completion */
427 q->phba->mbox = NULL;
428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
441 static struct lpfc_eqe *
442 lpfc_sli4_eq_get(struct lpfc_queue *q)
444 struct lpfc_eqe *eqe;
446 /* sanity check on queue memory */
449 eqe = lpfc_sli4_qe(q, q->host_index);
451 /* If the next EQE is not valid then we are done */
452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
456 * insert barrier for instruction interlock : data from the hardware
457 * must have the valid bit checked before it can be copied and acted
458 * upon. Speculative instructions were allowing a bcopy at the start
459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 * after our return, to copy data before the valid bit check above
461 * was done. As such, some of the copied data was stale. The barrier
462 * ensures the check is before any data is copied.
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
476 struct lpfc_register doorbell;
479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
495 struct lpfc_register doorbell;
498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 uint32_t count, bool arm)
517 struct lpfc_register doorbell;
519 /* sanity check on queue memory */
520 if (unlikely(!q || (count == 0 && !arm)))
523 /* ring doorbell for number popped */
526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 readl(q->phba->sli4_hba.EQDBregaddr);
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 uint32_t count, bool arm)
555 struct lpfc_register doorbell;
557 /* sanity check on queue memory */
558 if (unlikely(!q || (count == 0 && !arm)))
561 /* ring doorbell for number popped */
564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 readl(q->phba->sli4_hba.EQDBregaddr);
574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 struct lpfc_eqe *eqe)
577 if (!phba->sli4_hba.pc_sli4_params.eqav)
578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
582 /* if the index wrapped around, toggle the valid bit */
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
590 struct lpfc_eqe *eqe = NULL;
591 u32 eq_count = 0, cq_count = 0;
592 struct lpfc_cqe *cqe = NULL;
593 struct lpfc_queue *cq = NULL, *childq = NULL;
596 /* walk all the EQ entries and drop on the floor */
597 eqe = lpfc_sli4_eq_get(eq);
599 /* Get the reference to the corresponding CQ */
600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
603 list_for_each_entry(childq, &eq->child_list, list) {
604 if (childq->queue_id == cqid) {
609 /* If CQ is valid, iterate through it and drop all the CQEs */
611 cqe = lpfc_sli4_cq_get(cq);
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
615 cqe = lpfc_sli4_cq_get(cq);
617 /* Clear and re-arm the CQ */
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
624 eqe = lpfc_sli4_eq_get(eq);
627 /* Clear and re-arm the EQ */
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
635 struct lpfc_eqe *eqe;
636 int count = 0, consumed = 0;
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
641 eqe = lpfc_sli4_eq_get(eq);
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
647 if (!(++count % eq->max_proc_limit))
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
656 eqe = lpfc_sli4_eq_get(eq);
658 eq->EQ_processed += count;
660 /* Track the max number of EQEs processed in 1 intr */
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
664 xchg(&eq->queue_claimed, 0);
667 /* Always clear the EQ. */
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
682 static struct lpfc_cqe *
683 lpfc_sli4_cq_get(struct lpfc_queue *q)
685 struct lpfc_cqe *cqe;
687 /* sanity check on queue memory */
690 cqe = lpfc_sli4_qe(q, q->host_index);
692 /* If the next CQE is not valid then we are done */
693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
697 * insert barrier for instruction interlock : data from the hardware
698 * must have the valid bit checked before it can be copied and acted
699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 * instructions allowing action on content before valid bit checked,
701 * add barrier here as well. May not be needed as "content" is a
702 * single 32-bit entity here (vs multi word structure for cq's).
709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 struct lpfc_cqe *cqe)
712 if (!phba->sli4_hba.pc_sli4_params.cqav)
713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
717 /* if the index wrapped around, toggle the valid bit */
718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 uint32_t count, bool arm)
737 struct lpfc_register doorbell;
739 /* sanity check on queue memory */
740 if (unlikely(!q || (count == 0 && !arm)))
743 /* ring doorbell for number popped */
746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 uint32_t count, bool arm)
770 struct lpfc_register doorbell;
772 /* sanity check on queue memory */
773 if (unlikely(!q || (count == 0 && !arm)))
776 /* ring doorbell for number popped */
779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
799 struct lpfc_rqe *temp_hrqe;
800 struct lpfc_rqe *temp_drqe;
801 struct lpfc_register doorbell;
805 /* sanity check on queue memory */
806 if (unlikely(!hq) || unlikely(!dq))
808 hq_put_index = hq->host_index;
809 dq_put_index = dq->host_index;
810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
815 if (hq_put_index != dq_put_index)
817 /* If the host has not yet processed the next entry then we are done */
818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
823 /* Update the host index to point to the next slot */
824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
828 /* Ring The Header Receive Queue Doorbell */
829 if (!(hq->host_index % hq->notify_interval)) {
831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 hq->notify_interval);
834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 hq->notify_interval);
838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
844 writel(doorbell.word0, hq->db_regaddr);
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
861 /* sanity check on queue memory */
862 if (unlikely(!hq) || unlikely(!dq))
865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
882 static inline IOCB_t *
883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
899 static inline IOCB_t *
900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 struct lpfc_iocbq * iocbq = NULL;
921 lockdep_assert_held(&phba->hbalock);
923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
926 if (phba->iocb_cnt > phba->iocb_max)
927 phba->iocb_max = phba->iocb_cnt;
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
941 * Returns sglq ponter = success, NULL = Failure.
944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
946 struct lpfc_sglq *sglq;
948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
963 * Returns sglq ponter = success, NULL = Failure.
966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
968 struct lpfc_sglq *sglq;
970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
984 struct lpfc_node_rrq *rrq)
986 struct lpfc_nodelist *ndlp = NULL;
988 /* Lookup did to verify if did is still active on this vport */
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
998 rrq->rrq_stop_time = 0;
1001 mempool_free(rrq, phba->rrq_pool);
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1027 spin_lock_irqsave(&phba->hbalock, iflags);
1028 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1037 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!(phba->pport->load_flag & FC_UNLOADING)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044 /* this call will free the rrq */
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047 /* if we send the rrq then the completion handler
1048 * will clear the bit in the xribitmap.
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 * rrq = rrq for this xri and target.
1065 struct lpfc_node_rrq *
1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1075 spin_lock_irqsave(&phba->hbalock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->hbalock, iflags);
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1112 spin_lock_irqsave(&phba->hbalock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1121 spin_unlock_irqrestore(&phba->hbalock, iflags);
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1145 if (!ndlp->active_rrqs_xri_bitmap)
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1165 * returns 0 rrq actived for this xri
1166 * < 0 No memory or invalid ndlp.
1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1179 if (!phba->cfg_enable_rrq)
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 if (phba->pport->load_flag & FC_UNLOADING) {
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191 if (!ndlp->active_rrqs_xri_bitmap)
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1216 spin_lock_irqsave(&phba->hbalock, iflags);
1217 empty = list_empty(&phba->active_rrq_list);
1218 list_add_tail(&rrq->list, &phba->active_rrq_list);
1219 phba->hba_flag |= HBA_RRQ_ACTIVE;
1221 lpfc_worker_wake_up(phba);
1222 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228 " DID:0x%x Send:%d\n",
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1234 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1235 * @phba: Pointer to HBA context object.
1236 * @piocbq: Pointer to the iocbq.
1238 * The driver calls this function with either the nvme ls ring lock
1239 * or the fc els ring lock held depending on the iocb usage. This function
1240 * gets a new driver sglq object from the sglq list. If the list is not empty
1241 * then it is successful, it returns pointer to the newly allocated sglq
1242 * object else it returns NULL.
1244 static struct lpfc_sglq *
1245 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1247 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248 struct lpfc_sglq *sglq = NULL;
1249 struct lpfc_sglq *start_sglq = NULL;
1250 struct lpfc_io_buf *lpfc_cmd;
1251 struct lpfc_nodelist *ndlp;
1255 cmnd = get_job_cmnd(phba, piocbq);
1257 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258 lpfc_cmd = piocbq->io_buf;
1259 ndlp = lpfc_cmd->rdata->pnode;
1260 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262 ndlp = piocbq->ndlp;
1263 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1267 ndlp = piocbq->ndlp;
1269 ndlp = piocbq->ndlp;
1272 spin_lock(&phba->sli4_hba.sgl_list_lock);
1273 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279 test_bit(sglq->sli4_lxritag,
1280 ndlp->active_rrqs_xri_bitmap)) {
1281 /* This xri has an rrq outstanding for this DID.
1282 * put it back in the list and get another xri.
1284 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1286 list_remove_head(lpfc_els_sgl_list, sglq,
1287 struct lpfc_sglq, list);
1288 if (sglq == start_sglq) {
1289 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298 sglq->state = SGL_ALLOCATED;
1300 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1305 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1306 * @phba: Pointer to HBA context object.
1307 * @piocbq: Pointer to the iocbq.
1309 * This function is called with the sgl_list lock held. This function
1310 * gets a new driver sglq object from the sglq list. If the
1311 * list is not empty then it is successful, it returns pointer to the newly
1312 * allocated sglq object else it returns NULL.
1315 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1317 struct list_head *lpfc_nvmet_sgl_list;
1318 struct lpfc_sglq *sglq = NULL;
1320 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1322 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1324 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1327 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328 sglq->state = SGL_ALLOCATED;
1333 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1334 * @phba: Pointer to HBA context object.
1336 * This function is called with no lock held. This function
1337 * allocates a new driver iocb object from the iocb pool. If the
1338 * allocation is successful, it returns pointer to the newly
1339 * allocated iocb object else it returns NULL.
1342 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1344 struct lpfc_iocbq * iocbq = NULL;
1345 unsigned long iflags;
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 iocbq = __lpfc_sli_get_iocbq(phba);
1349 spin_unlock_irqrestore(&phba->hbalock, iflags);
1354 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1355 * @phba: Pointer to HBA context object.
1356 * @iocbq: Pointer to driver iocb object.
1358 * This function is called to release the driver iocb object
1359 * to the iocb pool. The iotag in the iocb object
1360 * does not change for each use of the iocb object. This function
1361 * clears all other fields of the iocb object when it is freed.
1362 * The sqlq structure that holds the xritag and phys and virtual
1363 * mappings for the scatter gather list is retrieved from the
1364 * active array of sglq. The get of the sglq pointer also clears
1365 * the entry in the array. If the status of the IO indiactes that
1366 * this IO was aborted then the sglq entry it put on the
1367 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1368 * IO has good status or fails for any other reason then the sglq
1369 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1370 * asserted held in the code path calling this routine.
1373 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1375 struct lpfc_sglq *sglq;
1376 size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1377 unsigned long iflag = 0;
1378 struct lpfc_sli_ring *pring;
1380 if (iocbq->sli4_xritag == NO_XRI)
1383 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1387 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1388 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1390 sglq->state = SGL_FREED;
1392 list_add_tail(&sglq->list,
1393 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1394 spin_unlock_irqrestore(
1395 &phba->sli4_hba.sgl_list_lock, iflag);
1399 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1400 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1401 sglq->state != SGL_XRI_ABORTED) {
1402 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1405 /* Check if we can get a reference on ndlp */
1406 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1409 list_add(&sglq->list,
1410 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1411 spin_unlock_irqrestore(
1412 &phba->sli4_hba.sgl_list_lock, iflag);
1414 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1416 sglq->state = SGL_FREED;
1418 list_add_tail(&sglq->list,
1419 &phba->sli4_hba.lpfc_els_sgl_list);
1420 spin_unlock_irqrestore(
1421 &phba->sli4_hba.sgl_list_lock, iflag);
1422 pring = lpfc_phba_elsring(phba);
1423 /* Check if TXQ queue needs to be serviced */
1424 if (pring && (!list_empty(&pring->txq)))
1425 lpfc_worker_wake_up(phba);
1431 * Clean all volatile data fields, preserve iotag and node struct.
1433 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1434 iocbq->sli4_lxritag = NO_XRI;
1435 iocbq->sli4_xritag = NO_XRI;
1436 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1438 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1443 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1444 * @phba: Pointer to HBA context object.
1445 * @iocbq: Pointer to driver iocb object.
1447 * This function is called to release the driver iocb object to the
1448 * iocb pool. The iotag in the iocb object does not change for each
1449 * use of the iocb object. This function clears all other fields of
1450 * the iocb object when it is freed. The hbalock is asserted held in
1451 * the code path calling this routine.
1454 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1456 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1459 * Clean all volatile data fields, preserve iotag and node struct.
1461 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1462 iocbq->sli4_xritag = NO_XRI;
1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468 * @phba: Pointer to HBA context object.
1469 * @iocbq: Pointer to driver iocb object.
1471 * This function is called with hbalock held to release driver
1472 * iocb object to the iocb pool. The iotag in the iocb object
1473 * does not change for each use of the iocb object. This function
1474 * clears all other fields of the iocb object when it is freed.
1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1479 lockdep_assert_held(&phba->hbalock);
1481 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487 * @phba: Pointer to HBA context object.
1488 * @iocbq: Pointer to driver iocb object.
1490 * This function is called with no lock held to release the iocb to
1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1496 unsigned long iflags;
1499 * Clean all volatile data fields, preserve iotag and node struct.
1501 spin_lock_irqsave(&phba->hbalock, iflags);
1502 __lpfc_sli_release_iocbq(phba, iocbq);
1503 spin_unlock_irqrestore(&phba->hbalock, iflags);
1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508 * @phba: Pointer to HBA context object.
1509 * @iocblist: List of IOCBs.
1510 * @ulpstatus: ULP status in IOCB command field.
1511 * @ulpWord4: ULP word-4 in IOCB command field.
1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514 * on the list by invoking the complete callback function associated with the
1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 uint32_t ulpstatus, uint32_t ulpWord4)
1522 struct lpfc_iocbq *piocb;
1524 while (!list_empty(iocblist)) {
1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 if (piocb->cmd_cmpl) {
1527 if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 lpfc_nvme_cancel_iocb(phba, piocb,
1529 ulpstatus, ulpWord4);
1531 if (phba->sli_rev == LPFC_SLI_REV4) {
1532 bf_set(lpfc_wcqe_c_status,
1533 &piocb->wcqe_cmpl, ulpstatus);
1534 piocb->wcqe_cmpl.parameter = ulpWord4;
1536 piocb->iocb.ulpStatus = ulpstatus;
1537 piocb->iocb.un.ulpWord[4] = ulpWord4;
1539 (piocb->cmd_cmpl) (phba, piocb, piocb);
1542 lpfc_sli_release_iocbq(phba, piocb);
1549 * lpfc_sli_iocb_cmd_type - Get the iocb type
1550 * @iocb_cmnd: iocb command code.
1552 * This function is called by ring event handler function to get the iocb type.
1553 * This function translates the iocb command to an iocb command type used to
1554 * decide the final disposition of each completed IOCB.
1555 * The function returns
1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557 * LPFC_SOL_IOCB if it is a solicited iocb completion
1558 * LPFC_ABORT_IOCB if it is an abort iocb
1559 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1561 * The caller is not required to hold any lock.
1563 static lpfc_iocb_type
1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571 switch (iocb_cmnd) {
1572 case CMD_XMIT_SEQUENCE_CR:
1573 case CMD_XMIT_SEQUENCE_CX:
1574 case CMD_XMIT_BCAST_CN:
1575 case CMD_XMIT_BCAST_CX:
1576 case CMD_ELS_REQUEST_CR:
1577 case CMD_ELS_REQUEST_CX:
1578 case CMD_CREATE_XRI_CR:
1579 case CMD_CREATE_XRI_CX:
1580 case CMD_GET_RPI_CN:
1581 case CMD_XMIT_ELS_RSP_CX:
1582 case CMD_GET_RPI_CR:
1583 case CMD_FCP_IWRITE_CR:
1584 case CMD_FCP_IWRITE_CX:
1585 case CMD_FCP_IREAD_CR:
1586 case CMD_FCP_IREAD_CX:
1587 case CMD_FCP_ICMND_CR:
1588 case CMD_FCP_ICMND_CX:
1589 case CMD_FCP_TSEND_CX:
1590 case CMD_FCP_TRSP_CX:
1591 case CMD_FCP_TRECEIVE_CX:
1592 case CMD_FCP_AUTO_TRSP_CX:
1593 case CMD_ADAPTER_MSG:
1594 case CMD_ADAPTER_DUMP:
1595 case CMD_XMIT_SEQUENCE64_CR:
1596 case CMD_XMIT_SEQUENCE64_CX:
1597 case CMD_XMIT_BCAST64_CN:
1598 case CMD_XMIT_BCAST64_CX:
1599 case CMD_ELS_REQUEST64_CR:
1600 case CMD_ELS_REQUEST64_CX:
1601 case CMD_FCP_IWRITE64_CR:
1602 case CMD_FCP_IWRITE64_CX:
1603 case CMD_FCP_IREAD64_CR:
1604 case CMD_FCP_IREAD64_CX:
1605 case CMD_FCP_ICMND64_CR:
1606 case CMD_FCP_ICMND64_CX:
1607 case CMD_FCP_TSEND64_CX:
1608 case CMD_FCP_TRSP64_CX:
1609 case CMD_FCP_TRECEIVE64_CX:
1610 case CMD_GEN_REQUEST64_CR:
1611 case CMD_GEN_REQUEST64_CX:
1612 case CMD_XMIT_ELS_RSP64_CX:
1613 case DSSCMD_IWRITE64_CR:
1614 case DSSCMD_IWRITE64_CX:
1615 case DSSCMD_IREAD64_CR:
1616 case DSSCMD_IREAD64_CX:
1617 case CMD_SEND_FRAME:
1618 type = LPFC_SOL_IOCB;
1620 case CMD_ABORT_XRI_CN:
1621 case CMD_ABORT_XRI_CX:
1622 case CMD_CLOSE_XRI_CN:
1623 case CMD_CLOSE_XRI_CX:
1624 case CMD_XRI_ABORTED_CX:
1625 case CMD_ABORT_MXRI64_CN:
1626 case CMD_XMIT_BLS_RSP64_CX:
1627 type = LPFC_ABORT_IOCB;
1629 case CMD_RCV_SEQUENCE_CX:
1630 case CMD_RCV_ELS_REQ_CX:
1631 case CMD_RCV_SEQUENCE64_CX:
1632 case CMD_RCV_ELS_REQ64_CX:
1633 case CMD_ASYNC_STATUS:
1634 case CMD_IOCB_RCV_SEQ64_CX:
1635 case CMD_IOCB_RCV_ELS64_CX:
1636 case CMD_IOCB_RCV_CONT64_CX:
1637 case CMD_IOCB_RET_XRI64_CX:
1638 type = LPFC_UNSOL_IOCB;
1640 case CMD_IOCB_XMIT_MSEQ64_CR:
1641 case CMD_IOCB_XMIT_MSEQ64_CX:
1642 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 case CMD_IOCB_ABORT_EXTENDED_CN:
1646 case CMD_IOCB_RET_HBQE64_CN:
1647 case CMD_IOCB_FCP_IBIDIR64_CR:
1648 case CMD_IOCB_FCP_IBIDIR64_CX:
1649 case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 case CMD_IOCB_LOGENTRY_CN:
1651 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 printk("%s - Unhandled SLI-3 Command x%x\n",
1653 __func__, iocb_cmnd);
1654 type = LPFC_UNKNOWN_IOCB;
1657 type = LPFC_UNKNOWN_IOCB;
1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666 * @phba: Pointer to HBA context object.
1668 * This function is called from SLI initialization code
1669 * to configure every ring of the HBA's SLI interface. The
1670 * caller is not required to hold any lock. This function issues
1671 * a config_ring mailbox command for each ring.
1672 * This function returns zero if successful else returns a negative
1676 lpfc_sli_ring_map(struct lpfc_hba *phba)
1678 struct lpfc_sli *psli = &phba->sli;
1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1687 phba->link_state = LPFC_INIT_MBX_CMDS;
1688 for (i = 0; i < psli->num_rings; i++) {
1689 lpfc_config_ring(phba, i, pmb);
1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 if (rc != MBX_SUCCESS) {
1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 "0446 Adapter failed to init (%d), "
1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1696 rc, pmbox->mbxCommand,
1697 pmbox->mbxStatus, i);
1698 phba->link_state = LPFC_HBA_ERROR;
1703 mempool_free(pmb, phba->mbox_mem_pool);
1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709 * @phba: Pointer to HBA context object.
1710 * @pring: Pointer to driver SLI ring object.
1711 * @piocb: Pointer to the driver iocb object.
1713 * The driver calls this function with the hbalock held for SLI3 ports or
1714 * the ring lock held for SLI4 ports. The function adds the
1715 * new iocb to txcmplq of the given ring. This function always returns
1716 * 0. If this function is called for ELS ring, this function checks if
1717 * there is a vport associated with the ELS command. This function also
1718 * starts els_tmofunc timer if this is an ELS command.
1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 struct lpfc_iocbq *piocb)
1724 u32 ulp_command = 0;
1727 ulp_command = get_job_cmnd(phba, piocb);
1729 list_add_tail(&piocb->list, &pring->txcmplq);
1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 pring->txcmplq_cnt++;
1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 (ulp_command != CMD_ABORT_XRI_CN) &&
1735 (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 BUG_ON(!piocb->vport);
1737 if (!(piocb->vport->load_flag & FC_UNLOADING))
1738 mod_timer(&piocb->vport->els_tmofunc,
1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1747 * lpfc_sli_ringtx_get - Get first element of the txq
1748 * @phba: Pointer to HBA context object.
1749 * @pring: Pointer to driver SLI ring object.
1751 * This function is called with hbalock held to get next
1752 * iocb in txq of the given ring. If there is any iocb in
1753 * the txq, the function returns first iocb in the list after
1754 * removing the iocb from the list, else it returns NULL.
1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1759 struct lpfc_iocbq *cmd_iocb;
1761 lockdep_assert_held(&phba->hbalock);
1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769 * @phba: Pointer to HBA context object.
1770 * @cmdiocb: Pointer to driver command iocb object.
1771 * @rspiocb: Pointer to driver response iocb object.
1773 * This routine will inform the driver of any BW adjustments we need
1774 * to make. These changes will be picked up during the next CMF
1775 * timer interrupt. In addition, any BW changes will be logged
1776 * with LOG_CGN_MGMT.
1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 struct lpfc_iocbq *rspiocb)
1782 union lpfc_wqe128 *wqe;
1783 uint32_t status, info;
1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 uint64_t bw, bwdif, slop;
1786 uint64_t pcent, bwpcent;
1787 int asig, afpin, sigcnt, fpincnt;
1788 int wsigmax, wfpinmax, cg, tdp;
1791 /* First check for error */
1792 status = bf_get(lpfc_wcqe_c_status, wcqe);
1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 "6211 CMF_SYNC_WQE Error "
1796 "req_tag x%x status x%x hwstatus x%x "
1797 "tdatap x%x parm x%x\n",
1798 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 bf_get(lpfc_wcqe_c_status, wcqe),
1800 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 wcqe->total_data_placed,
1806 /* Gather congestion information on a successful cmpl */
1807 info = wcqe->parameter;
1808 phba->cmf_active_info = info;
1810 /* See if firmware info count is valid or has changed */
1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1814 phba->cmf_info_per_interval = info;
1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1819 /* Get BW requirement from firmware */
1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1828 /* Gather information needed for logging if a BW change is required */
1829 wqe = &cmdiocb->wqe;
1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 if (phba->cmf_max_bytes_per_interval != bw ||
1835 (asig || afpin || sigcnt || fpincnt)) {
1836 /* Are we increasing or decreasing BW */
1837 if (phba->cmf_max_bytes_per_interval < bw) {
1838 bwdif = bw - phba->cmf_max_bytes_per_interval;
1841 bwdif = phba->cmf_max_bytes_per_interval - bw;
1845 /* What is the change percentage */
1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847 pcent = div64_u64(bwdif * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 bwpcent = div64_u64(bw * 100 + slop,
1850 phba->cmf_link_byte_count);
1852 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1853 "6237 BW Threshold %lld%% (%lld): "
1854 "%lld%% %s: Signal Alarm: cg:%d "
1856 bwpcent, bw, pcent, s, cg,
1857 phba->cmf_active_info);
1859 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1860 "6238 BW Threshold %lld%% (%lld): "
1861 "%lld%% %s: FPIN Alarm: cg:%d "
1863 bwpcent, bw, pcent, s, cg,
1864 phba->cmf_active_info);
1865 } else if (sigcnt) {
1866 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868 "6239 BW Threshold %lld%% (%lld): "
1869 "%lld%% %s: Signal Warning: "
1870 "Cnt %d Max %d: cg:%d Info:%u\n",
1871 bwpcent, bw, pcent, s, sigcnt,
1872 wsigmax, cg, phba->cmf_active_info);
1873 } else if (fpincnt) {
1874 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1875 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876 "6240 BW Threshold %lld%% (%lld): "
1877 "%lld%% %s: FPIN Warning: "
1878 "Cnt %d Max %d: cg:%d Info:%u\n",
1879 bwpcent, bw, pcent, s, fpincnt,
1880 wfpinmax, cg, phba->cmf_active_info);
1882 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883 "6241 BW Threshold %lld%% (%lld): "
1884 "CMF %lld%% %s: cg:%d Info:%u\n",
1885 bwpcent, bw, pcent, s, cg,
1886 phba->cmf_active_info);
1889 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1890 "6246 Info Threshold %u\n", info);
1893 /* Save BW change to be picked up during next timer interrupt */
1894 phba->cmf_last_sync_bw = bw;
1896 lpfc_sli_release_iocbq(phba, cmdiocb);
1900 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1901 * @phba: Pointer to HBA context object.
1902 * @ms: ms to set in WQE interval, 0 means use init op
1903 * @total: Total rcv bytes for this interval
1905 * This routine is called every CMF timer interrupt. Its purpose is
1906 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1907 * that may indicate we have congestion (FPINs or Signals). Upon
1908 * completion, the firmware will indicate any BW restrictions the
1909 * driver may need to take.
1912 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1914 union lpfc_wqe128 *wqe;
1915 struct lpfc_iocbq *sync_buf;
1916 unsigned long iflags;
1918 u32 atot, wtot, max;
1920 /* First address any alarm / warning activity */
1921 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1922 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1924 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1925 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1926 phba->link_state == LPFC_LINK_DOWN)
1929 spin_lock_irqsave(&phba->hbalock, iflags);
1930 sync_buf = __lpfc_sli_get_iocbq(phba);
1932 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1933 "6244 No available WQEs for CMF_SYNC_WQE\n");
1938 wqe = &sync_buf->wqe;
1940 /* WQEs are reused. Clear stale data and set key fields to zero */
1941 memset(wqe, 0, sizeof(*wqe));
1943 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1945 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1946 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1948 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1949 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1953 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1954 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1956 /* Check for alarms / warnings */
1958 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1959 /* We hit an Signal alarm condition */
1960 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1962 /* We hit a FPIN alarm condition */
1963 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1966 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1967 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968 /* We hit an Signal warning condition */
1969 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1970 lpfc_acqe_cgn_frequency;
1971 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1972 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1974 /* We hit a FPIN warning condition */
1975 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1976 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1980 /* Update total read blocks during previous timer interval */
1981 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1984 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1985 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1986 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1988 /* Setup reqtag to match the wqe completion. */
1989 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1991 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1993 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
1994 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
1995 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
1997 sync_buf->vport = phba->pport;
1998 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
1999 sync_buf->cmd_dmabuf = NULL;
2000 sync_buf->rsp_dmabuf = NULL;
2001 sync_buf->bpl_dmabuf = NULL;
2002 sync_buf->sli4_xritag = NO_XRI;
2004 sync_buf->cmd_flag |= LPFC_IO_CMF;
2005 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2007 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2008 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2011 spin_unlock_irqrestore(&phba->hbalock, iflags);
2016 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2017 * @phba: Pointer to HBA context object.
2018 * @pring: Pointer to driver SLI ring object.
2020 * This function is called with hbalock held and the caller must post the
2021 * iocb without releasing the lock. If the caller releases the lock,
2022 * iocb slot returned by the function is not guaranteed to be available.
2023 * The function returns pointer to the next available iocb slot if there
2024 * is available slot in the ring, else it returns NULL.
2025 * If the get index of the ring is ahead of the put index, the function
2026 * will post an error attention event to the worker thread to take the
2027 * HBA to offline state.
2030 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2032 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2033 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2035 lockdep_assert_held(&phba->hbalock);
2037 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2038 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2039 pring->sli.sli3.next_cmdidx = 0;
2041 if (unlikely(pring->sli.sli3.local_getidx ==
2042 pring->sli.sli3.next_cmdidx)) {
2044 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2046 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048 "0315 Ring %d issue: portCmdGet %d "
2049 "is bigger than cmd ring %d\n",
2051 pring->sli.sli3.local_getidx,
2054 phba->link_state = LPFC_HBA_ERROR;
2056 * All error attention handlers are posted to
2059 phba->work_ha |= HA_ERATT;
2060 phba->work_hs = HS_FFER3;
2062 lpfc_worker_wake_up(phba);
2067 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2071 return lpfc_cmd_iocb(phba, pring);
2075 * lpfc_sli_next_iotag - Get an iotag for the iocb
2076 * @phba: Pointer to HBA context object.
2077 * @iocbq: Pointer to driver iocb object.
2079 * This function gets an iotag for the iocb. If there is no unused iotag and
2080 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2081 * array and assigns a new iotag.
2082 * The function returns the allocated iotag if successful, else returns zero.
2083 * Zero is not a valid iotag.
2084 * The caller is not required to hold any lock.
2087 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2089 struct lpfc_iocbq **new_arr;
2090 struct lpfc_iocbq **old_arr;
2092 struct lpfc_sli *psli = &phba->sli;
2095 spin_lock_irq(&phba->hbalock);
2096 iotag = psli->last_iotag;
2097 if(++iotag < psli->iocbq_lookup_len) {
2098 psli->last_iotag = iotag;
2099 psli->iocbq_lookup[iotag] = iocbq;
2100 spin_unlock_irq(&phba->hbalock);
2101 iocbq->iotag = iotag;
2103 } else if (psli->iocbq_lookup_len < (0xffff
2104 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2105 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2106 spin_unlock_irq(&phba->hbalock);
2107 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2110 spin_lock_irq(&phba->hbalock);
2111 old_arr = psli->iocbq_lookup;
2112 if (new_len <= psli->iocbq_lookup_len) {
2113 /* highly unprobable case */
2115 iotag = psli->last_iotag;
2116 if(++iotag < psli->iocbq_lookup_len) {
2117 psli->last_iotag = iotag;
2118 psli->iocbq_lookup[iotag] = iocbq;
2119 spin_unlock_irq(&phba->hbalock);
2120 iocbq->iotag = iotag;
2123 spin_unlock_irq(&phba->hbalock);
2126 if (psli->iocbq_lookup)
2127 memcpy(new_arr, old_arr,
2128 ((psli->last_iotag + 1) *
2129 sizeof (struct lpfc_iocbq *)));
2130 psli->iocbq_lookup = new_arr;
2131 psli->iocbq_lookup_len = new_len;
2132 psli->last_iotag = iotag;
2133 psli->iocbq_lookup[iotag] = iocbq;
2134 spin_unlock_irq(&phba->hbalock);
2135 iocbq->iotag = iotag;
2140 spin_unlock_irq(&phba->hbalock);
2142 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2143 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2150 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2151 * @phba: Pointer to HBA context object.
2152 * @pring: Pointer to driver SLI ring object.
2153 * @iocb: Pointer to iocb slot in the ring.
2154 * @nextiocb: Pointer to driver iocb object which need to be
2155 * posted to firmware.
2157 * This function is called to post a new iocb to the firmware. This
2158 * function copies the new iocb to ring iocb slot and updates the
2159 * ring pointers. It adds the new iocb to txcmplq if there is
2160 * a completion call back for this iocb else the function will free the
2161 * iocb object. The hbalock is asserted held in the code path calling
2165 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2166 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2171 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2174 if (pring->ringno == LPFC_ELS_RING) {
2175 lpfc_debugfs_slow_ring_trc(phba,
2176 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2177 *(((uint32_t *) &nextiocb->iocb) + 4),
2178 *(((uint32_t *) &nextiocb->iocb) + 6),
2179 *(((uint32_t *) &nextiocb->iocb) + 7));
2183 * Issue iocb command to adapter
2185 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2187 pring->stats.iocb_cmd++;
2190 * If there is no completion routine to call, we can release the
2191 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2192 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2194 if (nextiocb->cmd_cmpl)
2195 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2197 __lpfc_sli_release_iocbq(phba, nextiocb);
2200 * Let the HBA know what IOCB slot will be the next one the
2201 * driver will put a command into.
2203 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2204 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2208 * lpfc_sli_update_full_ring - Update the chip attention register
2209 * @phba: Pointer to HBA context object.
2210 * @pring: Pointer to driver SLI ring object.
2212 * The caller is not required to hold any lock for calling this function.
2213 * This function updates the chip attention bits for the ring to inform firmware
2214 * that there are pending work to be done for this ring and requests an
2215 * interrupt when there is space available in the ring. This function is
2216 * called when the driver is unable to post more iocbs to the ring due
2217 * to unavailability of space in the ring.
2220 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2222 int ringno = pring->ringno;
2224 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2229 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2230 * The HBA will tell us when an IOCB entry is available.
2232 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2233 readl(phba->CAregaddr); /* flush */
2235 pring->stats.iocb_cmd_full++;
2239 * lpfc_sli_update_ring - Update chip attention register
2240 * @phba: Pointer to HBA context object.
2241 * @pring: Pointer to driver SLI ring object.
2243 * This function updates the chip attention register bit for the
2244 * given ring to inform HBA that there is more work to be done
2245 * in this ring. The caller is not required to hold any lock.
2248 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2250 int ringno = pring->ringno;
2253 * Tell the HBA that there is work to do in this ring.
2255 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2257 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2258 readl(phba->CAregaddr); /* flush */
2263 * lpfc_sli_resume_iocb - Process iocbs in the txq
2264 * @phba: Pointer to HBA context object.
2265 * @pring: Pointer to driver SLI ring object.
2267 * This function is called with hbalock held to post pending iocbs
2268 * in the txq to the firmware. This function is called when driver
2269 * detects space available in the ring.
2272 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2275 struct lpfc_iocbq *nextiocb;
2277 lockdep_assert_held(&phba->hbalock);
2281 * (a) there is anything on the txq to send
2283 * (c) link attention events can be processed (fcp ring only)
2284 * (d) IOCB processing is not blocked by the outstanding mbox command.
2287 if (lpfc_is_link_up(phba) &&
2288 (!list_empty(&pring->txq)) &&
2289 (pring->ringno != LPFC_FCP_RING ||
2290 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2292 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2293 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2294 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2297 lpfc_sli_update_ring(phba, pring);
2299 lpfc_sli_update_full_ring(phba, pring);
2306 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2307 * @phba: Pointer to HBA context object.
2308 * @hbqno: HBQ number.
2310 * This function is called with hbalock held to get the next
2311 * available slot for the given HBQ. If there is free slot
2312 * available for the HBQ it will return pointer to the next available
2313 * HBQ entry else it will return NULL.
2315 static struct lpfc_hbq_entry *
2316 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2318 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2320 lockdep_assert_held(&phba->hbalock);
2322 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2323 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2324 hbqp->next_hbqPutIdx = 0;
2326 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2327 uint32_t raw_index = phba->hbq_get[hbqno];
2328 uint32_t getidx = le32_to_cpu(raw_index);
2330 hbqp->local_hbqGetIdx = getidx;
2332 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2334 "1802 HBQ %d: local_hbqGetIdx "
2335 "%u is > than hbqp->entry_count %u\n",
2336 hbqno, hbqp->local_hbqGetIdx,
2339 phba->link_state = LPFC_HBA_ERROR;
2343 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2347 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2352 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2353 * @phba: Pointer to HBA context object.
2355 * This function is called with no lock held to free all the
2356 * hbq buffers while uninitializing the SLI interface. It also
2357 * frees the HBQ buffers returned by the firmware but not yet
2358 * processed by the upper layers.
2361 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2363 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2364 struct hbq_dmabuf *hbq_buf;
2365 unsigned long flags;
2368 hbq_count = lpfc_sli_hbq_count();
2369 /* Return all memory used by all HBQs */
2370 spin_lock_irqsave(&phba->hbalock, flags);
2371 for (i = 0; i < hbq_count; ++i) {
2372 list_for_each_entry_safe(dmabuf, next_dmabuf,
2373 &phba->hbqs[i].hbq_buffer_list, list) {
2374 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2375 list_del(&hbq_buf->dbuf.list);
2376 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2378 phba->hbqs[i].buffer_count = 0;
2381 /* Mark the HBQs not in use */
2382 phba->hbq_in_use = 0;
2383 spin_unlock_irqrestore(&phba->hbalock, flags);
2387 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2388 * @phba: Pointer to HBA context object.
2389 * @hbqno: HBQ number.
2390 * @hbq_buf: Pointer to HBQ buffer.
2392 * This function is called with the hbalock held to post a
2393 * hbq buffer to the firmware. If the function finds an empty
2394 * slot in the HBQ, it will post the buffer. The function will return
2395 * pointer to the hbq entry if it successfully post the buffer
2396 * else it will return NULL.
2399 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2400 struct hbq_dmabuf *hbq_buf)
2402 lockdep_assert_held(&phba->hbalock);
2403 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2407 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2408 * @phba: Pointer to HBA context object.
2409 * @hbqno: HBQ number.
2410 * @hbq_buf: Pointer to HBQ buffer.
2412 * This function is called with the hbalock held to post a hbq buffer to the
2413 * firmware. If the function finds an empty slot in the HBQ, it will post the
2414 * buffer and place it on the hbq_buffer_list. The function will return zero if
2415 * it successfully post the buffer else it will return an error.
2418 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2419 struct hbq_dmabuf *hbq_buf)
2421 struct lpfc_hbq_entry *hbqe;
2422 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2424 lockdep_assert_held(&phba->hbalock);
2425 /* Get next HBQ entry slot to use */
2426 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2428 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2430 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2431 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2432 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2433 hbqe->bde.tus.f.bdeFlags = 0;
2434 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2435 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2437 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2438 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2440 readl(phba->hbq_put + hbqno);
2441 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2448 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2449 * @phba: Pointer to HBA context object.
2450 * @hbqno: HBQ number.
2451 * @hbq_buf: Pointer to HBQ buffer.
2453 * This function is called with the hbalock held to post an RQE to the SLI4
2454 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2455 * the hbq_buffer_list and return zero, otherwise it will return an error.
2458 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2459 struct hbq_dmabuf *hbq_buf)
2462 struct lpfc_rqe hrqe;
2463 struct lpfc_rqe drqe;
2464 struct lpfc_queue *hrq;
2465 struct lpfc_queue *drq;
2467 if (hbqno != LPFC_ELS_HBQ)
2469 hrq = phba->sli4_hba.hdr_rq;
2470 drq = phba->sli4_hba.dat_rq;
2472 lockdep_assert_held(&phba->hbalock);
2473 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2474 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2475 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2476 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2477 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2480 hbq_buf->tag = (rc | (hbqno << 16));
2481 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2485 /* HBQ for ELS and CT traffic. */
2486 static struct lpfc_hbq_init lpfc_els_hbq = {
2491 .ring_mask = (1 << LPFC_ELS_RING),
2498 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2503 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2504 * @phba: Pointer to HBA context object.
2505 * @hbqno: HBQ number.
2506 * @count: Number of HBQ buffers to be posted.
2508 * This function is called with no lock held to post more hbq buffers to the
2509 * given HBQ. The function returns the number of HBQ buffers successfully
2513 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2515 uint32_t i, posted = 0;
2516 unsigned long flags;
2517 struct hbq_dmabuf *hbq_buffer;
2518 LIST_HEAD(hbq_buf_list);
2519 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2522 if ((phba->hbqs[hbqno].buffer_count + count) >
2523 lpfc_hbq_defs[hbqno]->entry_count)
2524 count = lpfc_hbq_defs[hbqno]->entry_count -
2525 phba->hbqs[hbqno].buffer_count;
2528 /* Allocate HBQ entries */
2529 for (i = 0; i < count; i++) {
2530 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2533 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2535 /* Check whether HBQ is still in use */
2536 spin_lock_irqsave(&phba->hbalock, flags);
2537 if (!phba->hbq_in_use)
2539 while (!list_empty(&hbq_buf_list)) {
2540 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2542 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2544 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2545 phba->hbqs[hbqno].buffer_count++;
2548 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2550 spin_unlock_irqrestore(&phba->hbalock, flags);
2553 spin_unlock_irqrestore(&phba->hbalock, flags);
2554 while (!list_empty(&hbq_buf_list)) {
2555 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2557 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2563 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2564 * @phba: Pointer to HBA context object.
2567 * This function posts more buffers to the HBQ. This function
2568 * is called with no lock held. The function returns the number of HBQ entries
2569 * successfully allocated.
2572 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2574 if (phba->sli_rev == LPFC_SLI_REV4)
2577 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2578 lpfc_hbq_defs[qno]->add_count);
2582 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2583 * @phba: Pointer to HBA context object.
2584 * @qno: HBQ queue number.
2586 * This function is called from SLI initialization code path with
2587 * no lock held to post initial HBQ buffers to firmware. The
2588 * function returns the number of HBQ entries successfully allocated.
2591 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2593 if (phba->sli_rev == LPFC_SLI_REV4)
2594 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2595 lpfc_hbq_defs[qno]->entry_count);
2597 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2598 lpfc_hbq_defs[qno]->init_count);
2602 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2604 * This function removes the first hbq buffer on an hbq list and returns a
2605 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2607 static struct hbq_dmabuf *
2608 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2610 struct lpfc_dmabuf *d_buf;
2612 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2615 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2619 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2620 * @phba: Pointer to HBA context object.
2623 * This function removes the first RQ buffer on an RQ buffer list and returns a
2624 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2626 static struct rqb_dmabuf *
2627 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2629 struct lpfc_dmabuf *h_buf;
2630 struct lpfc_rqb *rqbp;
2633 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2634 struct lpfc_dmabuf, list);
2637 rqbp->buffer_count--;
2638 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2642 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2643 * @phba: Pointer to HBA context object.
2644 * @tag: Tag of the hbq buffer.
2646 * This function searches for the hbq buffer associated with the given tag in
2647 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2648 * otherwise it returns NULL.
2650 static struct hbq_dmabuf *
2651 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2653 struct lpfc_dmabuf *d_buf;
2654 struct hbq_dmabuf *hbq_buf;
2658 if (hbqno >= LPFC_MAX_HBQS)
2661 spin_lock_irq(&phba->hbalock);
2662 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2664 if (hbq_buf->tag == tag) {
2665 spin_unlock_irq(&phba->hbalock);
2669 spin_unlock_irq(&phba->hbalock);
2670 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2671 "1803 Bad hbq tag. Data: x%x x%x\n",
2672 tag, phba->hbqs[tag >> 16].buffer_count);
2677 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2678 * @phba: Pointer to HBA context object.
2679 * @hbq_buffer: Pointer to HBQ buffer.
2681 * This function is called with hbalock. This function gives back
2682 * the hbq buffer to firmware. If the HBQ does not have space to
2683 * post the buffer, it will free the buffer.
2686 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2691 hbqno = hbq_buffer->tag >> 16;
2692 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2693 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2698 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2699 * @mbxCommand: mailbox command code.
2701 * This function is called by the mailbox event handler function to verify
2702 * that the completed mailbox command is a legitimate mailbox command. If the
2703 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2704 * and the mailbox event handler will take the HBA offline.
2707 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2711 switch (mbxCommand) {
2715 case MBX_WRITE_VPARMS:
2716 case MBX_RUN_BIU_DIAG:
2719 case MBX_CONFIG_LINK:
2720 case MBX_CONFIG_RING:
2721 case MBX_RESET_RING:
2722 case MBX_READ_CONFIG:
2723 case MBX_READ_RCONFIG:
2724 case MBX_READ_SPARM:
2725 case MBX_READ_STATUS:
2729 case MBX_READ_LNK_STAT:
2731 case MBX_UNREG_LOGIN:
2733 case MBX_DUMP_MEMORY:
2734 case MBX_DUMP_CONTEXT:
2737 case MBX_UPDATE_CFG:
2739 case MBX_DEL_LD_ENTRY:
2740 case MBX_RUN_PROGRAM:
2742 case MBX_SET_VARIABLE:
2743 case MBX_UNREG_D_ID:
2744 case MBX_KILL_BOARD:
2745 case MBX_CONFIG_FARP:
2748 case MBX_RUN_BIU_DIAG64:
2749 case MBX_CONFIG_PORT:
2750 case MBX_READ_SPARM64:
2751 case MBX_READ_RPI64:
2752 case MBX_REG_LOGIN64:
2753 case MBX_READ_TOPOLOGY:
2756 case MBX_LOAD_EXP_ROM:
2757 case MBX_ASYNCEVT_ENABLE:
2761 case MBX_PORT_CAPABILITIES:
2762 case MBX_PORT_IOV_CONTROL:
2763 case MBX_SLI4_CONFIG:
2764 case MBX_SLI4_REQ_FTRS:
2766 case MBX_UNREG_FCFI:
2771 case MBX_RESUME_RPI:
2772 case MBX_READ_EVENT_LOG_STATUS:
2773 case MBX_READ_EVENT_LOG:
2774 case MBX_SECURITY_MGMT:
2776 case MBX_ACCESS_VDATA:
2787 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2788 * @phba: Pointer to HBA context object.
2789 * @pmboxq: Pointer to mailbox command.
2791 * This is completion handler function for mailbox commands issued from
2792 * lpfc_sli_issue_mbox_wait function. This function is called by the
2793 * mailbox event handler function with no lock held. This function
2794 * will wake up thread waiting on the wait queue pointed by context1
2798 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2800 unsigned long drvr_flag;
2801 struct completion *pmbox_done;
2804 * If pmbox_done is empty, the driver thread gave up waiting and
2805 * continued running.
2807 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2808 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2809 pmbox_done = (struct completion *)pmboxq->context3;
2811 complete(pmbox_done);
2812 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2817 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2819 unsigned long iflags;
2821 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2822 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2823 spin_lock_irqsave(&ndlp->lock, iflags);
2824 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2825 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2826 spin_unlock_irqrestore(&ndlp->lock, iflags);
2828 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2832 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2834 __lpfc_sli_rpi_release(vport, ndlp);
2838 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2839 * @phba: Pointer to HBA context object.
2840 * @pmb: Pointer to mailbox object.
2842 * This function is the default mailbox completion handler. It
2843 * frees the memory resources associated with the completed mailbox
2844 * command. If the completed command is a REG_LOGIN mailbox command,
2845 * this function will issue a UREG_LOGIN to re-claim the RPI.
2848 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2850 struct lpfc_vport *vport = pmb->vport;
2851 struct lpfc_nodelist *ndlp;
2852 struct Scsi_Host *shost;
2857 * If a REG_LOGIN succeeded after node is destroyed or node
2858 * is in re-discovery driver need to cleanup the RPI.
2860 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2861 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2862 !pmb->u.mb.mbxStatus) {
2863 rpi = pmb->u.mb.un.varWords[0];
2864 vpi = pmb->u.mb.un.varRegLogin.vpi;
2865 if (phba->sli_rev == LPFC_SLI_REV4)
2866 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2867 lpfc_unreg_login(phba, vpi, rpi, pmb);
2869 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2870 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2871 if (rc != MBX_NOT_FINISHED)
2875 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2876 !(phba->pport->load_flag & FC_UNLOADING) &&
2877 !pmb->u.mb.mbxStatus) {
2878 shost = lpfc_shost_from_vport(vport);
2879 spin_lock_irq(shost->host_lock);
2880 vport->vpi_state |= LPFC_VPI_REGISTERED;
2881 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2882 spin_unlock_irq(shost->host_lock);
2885 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2886 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2890 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2891 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2893 /* Check to see if there are any deferred events to process */
2897 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2898 "1438 UNREG cmpl deferred mbox x%x "
2899 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2900 ndlp->nlp_rpi, ndlp->nlp_DID,
2901 ndlp->nlp_flag, ndlp->nlp_defer_did,
2902 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2904 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2905 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2906 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2907 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2908 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2910 __lpfc_sli_rpi_release(vport, ndlp);
2913 /* The unreg_login mailbox is complete and had a
2914 * reference that has to be released. The PLOGI
2918 pmb->ctx_ndlp = NULL;
2922 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2923 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2924 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2928 /* Check security permission status on INIT_LINK mailbox command */
2929 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2930 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2932 "2860 SLI authentication is required "
2933 "for INIT_LINK but has not done yet\n");
2935 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2936 lpfc_sli4_mbox_cmd_free(phba, pmb);
2938 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2941 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2942 * @phba: Pointer to HBA context object.
2943 * @pmb: Pointer to mailbox object.
2945 * This function is the unreg rpi mailbox completion handler. It
2946 * frees the memory resources associated with the completed mailbox
2947 * command. An additional reference is put on the ndlp to prevent
2948 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2949 * the unreg mailbox command completes, this routine puts the
2954 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2956 struct lpfc_vport *vport = pmb->vport;
2957 struct lpfc_nodelist *ndlp;
2959 ndlp = pmb->ctx_ndlp;
2960 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2961 if (phba->sli_rev == LPFC_SLI_REV4 &&
2962 (bf_get(lpfc_sli_intf_if_type,
2963 &phba->sli4_hba.sli_intf) >=
2964 LPFC_SLI_INTF_IF_TYPE_2)) {
2967 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2968 "0010 UNREG_LOGIN vpi:%x "
2969 "rpi:%x DID:%x defer x%x flg x%x "
2971 vport->vpi, ndlp->nlp_rpi,
2972 ndlp->nlp_DID, ndlp->nlp_defer_did,
2975 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2977 /* Check to see if there are any deferred
2980 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2981 (ndlp->nlp_defer_did !=
2982 NLP_EVT_NOTHING_PENDING)) {
2984 vport, KERN_INFO, LOG_DISCOVERY,
2985 "4111 UNREG cmpl deferred "
2987 "NPort x%x Data: x%x x%px\n",
2988 ndlp->nlp_rpi, ndlp->nlp_DID,
2989 ndlp->nlp_defer_did, ndlp);
2990 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2991 ndlp->nlp_defer_did =
2992 NLP_EVT_NOTHING_PENDING;
2993 lpfc_issue_els_plogi(
2994 vport, ndlp->nlp_DID, 0);
2996 __lpfc_sli_rpi_release(vport, ndlp);
3003 mempool_free(pmb, phba->mbox_mem_pool);
3007 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3008 * @phba: Pointer to HBA context object.
3010 * This function is called with no lock held. This function processes all
3011 * the completed mailbox commands and gives it to upper layers. The interrupt
3012 * service routine processes mailbox completion interrupt and adds completed
3013 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3014 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3015 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3016 * function returns the mailbox commands to the upper layer by calling the
3017 * completion handler function of each mailbox.
3020 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3027 phba->sli.slistat.mbox_event++;
3029 /* Get all completed mailboxe buffers into the cmplq */
3030 spin_lock_irq(&phba->hbalock);
3031 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3032 spin_unlock_irq(&phba->hbalock);
3034 /* Get a Mailbox buffer to setup mailbox commands for callback */
3036 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3042 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3044 lpfc_debugfs_disc_trc(pmb->vport,
3045 LPFC_DISC_TRC_MBOX_VPORT,
3046 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3047 (uint32_t)pmbox->mbxCommand,
3048 pmbox->un.varWords[0],
3049 pmbox->un.varWords[1]);
3052 lpfc_debugfs_disc_trc(phba->pport,
3054 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3055 (uint32_t)pmbox->mbxCommand,
3056 pmbox->un.varWords[0],
3057 pmbox->un.varWords[1]);
3062 * It is a fatal error if unknown mbox command completion.
3064 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3066 /* Unknown mailbox command compl */
3067 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3068 "(%d):0323 Unknown Mailbox command "
3069 "x%x (x%x/x%x) Cmpl\n",
3070 pmb->vport ? pmb->vport->vpi :
3073 lpfc_sli_config_mbox_subsys_get(phba,
3075 lpfc_sli_config_mbox_opcode_get(phba,
3077 phba->link_state = LPFC_HBA_ERROR;
3078 phba->work_hs = HS_FFER3;
3079 lpfc_handle_eratt(phba);
3083 if (pmbox->mbxStatus) {
3084 phba->sli.slistat.mbox_stat_err++;
3085 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3086 /* Mbox cmd cmpl error - RETRYing */
3087 lpfc_printf_log(phba, KERN_INFO,
3089 "(%d):0305 Mbox cmd cmpl "
3090 "error - RETRYing Data: x%x "
3091 "(x%x/x%x) x%x x%x x%x\n",
3092 pmb->vport ? pmb->vport->vpi :
3095 lpfc_sli_config_mbox_subsys_get(phba,
3097 lpfc_sli_config_mbox_opcode_get(phba,
3100 pmbox->un.varWords[0],
3101 pmb->vport ? pmb->vport->port_state :
3102 LPFC_VPORT_UNKNOWN);
3103 pmbox->mbxStatus = 0;
3104 pmbox->mbxOwner = OWN_HOST;
3105 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3106 if (rc != MBX_NOT_FINISHED)
3111 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3112 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3113 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3114 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3116 pmb->vport ? pmb->vport->vpi : 0,
3118 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3119 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3121 *((uint32_t *) pmbox),
3122 pmbox->un.varWords[0],
3123 pmbox->un.varWords[1],
3124 pmbox->un.varWords[2],
3125 pmbox->un.varWords[3],
3126 pmbox->un.varWords[4],
3127 pmbox->un.varWords[5],
3128 pmbox->un.varWords[6],
3129 pmbox->un.varWords[7],
3130 pmbox->un.varWords[8],
3131 pmbox->un.varWords[9],
3132 pmbox->un.varWords[10]);
3135 pmb->mbox_cmpl(phba,pmb);
3141 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3142 * @phba: Pointer to HBA context object.
3143 * @pring: Pointer to driver SLI ring object.
3146 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3147 * is set in the tag the buffer is posted for a particular exchange,
3148 * the function will return the buffer without replacing the buffer.
3149 * If the buffer is for unsolicited ELS or CT traffic, this function
3150 * returns the buffer and also posts another buffer to the firmware.
3152 static struct lpfc_dmabuf *
3153 lpfc_sli_get_buff(struct lpfc_hba *phba,
3154 struct lpfc_sli_ring *pring,
3157 struct hbq_dmabuf *hbq_entry;
3159 if (tag & QUE_BUFTAG_BIT)
3160 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3161 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3164 return &hbq_entry->dbuf;
3168 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3169 * containing a NVME LS request.
3170 * @phba: pointer to lpfc hba data structure.
3171 * @piocb: pointer to the iocbq struct representing the sequence starting
3174 * This routine initially validates the NVME LS, validates there is a login
3175 * with the port that sent the LS, and then calls the appropriate nvme host
3176 * or target LS request handler.
3179 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3181 struct lpfc_nodelist *ndlp;
3182 struct lpfc_dmabuf *d_buf;
3183 struct hbq_dmabuf *nvmebuf;
3184 struct fc_frame_header *fc_hdr;
3185 struct lpfc_async_xchg_ctx *axchg = NULL;
3186 char *failwhy = NULL;
3187 uint32_t oxid, sid, did, fctl, size;
3190 d_buf = piocb->cmd_dmabuf;
3192 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3193 fc_hdr = nvmebuf->hbuf.virt;
3194 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3195 sid = sli4_sid_from_fc_hdr(fc_hdr);
3196 did = sli4_did_from_fc_hdr(fc_hdr);
3197 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3198 fc_hdr->fh_f_ctl[1] << 8 |
3199 fc_hdr->fh_f_ctl[2]);
3200 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3202 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3205 if (phba->pport->load_flag & FC_UNLOADING) {
3206 failwhy = "Driver Unloading";
3207 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3208 failwhy = "NVME FC4 Disabled";
3209 } else if (!phba->nvmet_support && !phba->pport->localport) {
3210 failwhy = "No Localport";
3211 } else if (phba->nvmet_support && !phba->targetport) {
3212 failwhy = "No Targetport";
3213 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3214 failwhy = "Bad NVME LS R_CTL";
3215 } else if (unlikely((fctl & 0x00FF0000) !=
3216 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3217 failwhy = "Bad NVME LS F_CTL";
3219 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3221 failwhy = "No CTX memory";
3224 if (unlikely(failwhy)) {
3225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3226 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3227 sid, oxid, failwhy);
3231 /* validate the source of the LS is logged in */
3232 ndlp = lpfc_findnode_did(phba->pport, sid);
3234 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3235 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3236 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3237 "6216 NVME Unsol rcv: No ndlp: "
3238 "NPort_ID x%x oxid x%x\n",
3249 axchg->state = LPFC_NVME_STE_LS_RCV;
3250 axchg->entry_cnt = 1;
3251 axchg->rqb_buffer = (void *)nvmebuf;
3252 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3253 axchg->payload = nvmebuf->dbuf.virt;
3254 INIT_LIST_HEAD(&axchg->list);
3256 if (phba->nvmet_support) {
3257 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3258 spin_lock_irq(&ndlp->lock);
3259 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3260 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3261 spin_unlock_irq(&ndlp->lock);
3263 /* This reference is a single occurrence to hold the
3264 * node valid until the nvmet transport calls
3267 if (!lpfc_nlp_get(ndlp))
3270 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3271 "6206 NVMET unsol ls_req ndlp x%px "
3272 "DID x%x xflags x%x refcnt %d\n",
3273 ndlp, ndlp->nlp_DID,
3274 ndlp->fc4_xpt_flags,
3275 kref_read(&ndlp->kref));
3277 spin_unlock_irq(&ndlp->lock);
3280 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3283 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3289 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3290 "NVMe%s handler failed %d\n",
3292 (phba->nvmet_support) ? "T" : "I", ret);
3294 /* recycle receive buffer */
3295 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3297 /* If start of new exchange, abort it */
3298 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3299 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3306 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3307 * @phba: Pointer to HBA context object.
3308 * @pring: Pointer to driver SLI ring object.
3309 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3310 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3311 * @fch_type: the type for the first frame of the sequence.
3313 * This function is called with no lock held. This function uses the r_ctl and
3314 * type of the received sequence to find the correct callback function to call
3315 * to process the sequence.
3318 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3319 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3326 lpfc_nvme_unsol_ls_handler(phba, saveq);
3332 /* unSolicited Responses */
3333 if (pring->prt[0].profile) {
3334 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3335 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3339 /* We must search, based on rctl / type
3340 for the right routine */
3341 for (i = 0; i < pring->num_mask; i++) {
3342 if ((pring->prt[i].rctl == fch_r_ctl) &&
3343 (pring->prt[i].type == fch_type)) {
3344 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3345 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3346 (phba, pring, saveq);
3354 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3355 struct lpfc_iocbq *saveq)
3358 union lpfc_wqe128 *wqe;
3361 irsp = &saveq->iocb;
3364 /* Fill wcqe with the IOCB status fields */
3365 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3366 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3367 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3368 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3371 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3373 /* rx-id of the response frame */
3374 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3376 /* ox-id of the frame */
3377 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3378 irsp->unsli3.rcvsli3.ox_id);
3381 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3382 irsp->un.rcvels.remoteID);
3384 /* unsol data len */
3385 for (i = 0; i < irsp->ulpBdeCount; i++) {
3386 struct lpfc_hbq_entry *hbqe = NULL;
3388 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3390 hbqe = (struct lpfc_hbq_entry *)
3391 &irsp->un.ulpWord[0];
3392 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3393 hbqe->bde.tus.f.bdeSize;
3394 } else if (i == 1) {
3395 hbqe = (struct lpfc_hbq_entry *)
3396 &irsp->unsli3.sli3Words[4];
3397 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3404 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3405 * @phba: Pointer to HBA context object.
3406 * @pring: Pointer to driver SLI ring object.
3407 * @saveq: Pointer to the unsolicited iocb.
3409 * This function is called with no lock held by the ring event handler
3410 * when there is an unsolicited iocb posted to the response ring by the
3411 * firmware. This function gets the buffer associated with the iocbs
3412 * and calls the event handler for the ring. This function handles both
3413 * qring buffers and hbq buffers.
3414 * When the function returns 1 the caller can free the iocb object otherwise
3415 * upper layer functions will free the iocb objects.
3418 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3419 struct lpfc_iocbq *saveq)
3424 uint32_t Rctl, Type;
3425 struct lpfc_iocbq *iocbq;
3426 struct lpfc_dmabuf *dmzbuf;
3428 irsp = &saveq->iocb;
3429 saveq->vport = phba->pport;
3431 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3432 if (pring->lpfc_sli_rcv_async_status)
3433 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3435 lpfc_printf_log(phba,
3438 "0316 Ring %d handler: unexpected "
3439 "ASYNC_STATUS iocb received evt_code "
3442 irsp->un.asyncstat.evt_code);
3446 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3447 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3448 if (irsp->ulpBdeCount > 0) {
3449 dmzbuf = lpfc_sli_get_buff(phba, pring,
3450 irsp->un.ulpWord[3]);
3451 lpfc_in_buf_free(phba, dmzbuf);
3454 if (irsp->ulpBdeCount > 1) {
3455 dmzbuf = lpfc_sli_get_buff(phba, pring,
3456 irsp->unsli3.sli3Words[3]);
3457 lpfc_in_buf_free(phba, dmzbuf);
3460 if (irsp->ulpBdeCount > 2) {
3461 dmzbuf = lpfc_sli_get_buff(phba, pring,
3462 irsp->unsli3.sli3Words[7]);
3463 lpfc_in_buf_free(phba, dmzbuf);
3469 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3470 if (irsp->ulpBdeCount != 0) {
3471 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3472 irsp->un.ulpWord[3]);
3473 if (!saveq->cmd_dmabuf)
3474 lpfc_printf_log(phba,
3477 "0341 Ring %d Cannot find buffer for "
3478 "an unsolicited iocb. tag 0x%x\n",
3480 irsp->un.ulpWord[3]);
3482 if (irsp->ulpBdeCount == 2) {
3483 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3484 irsp->unsli3.sli3Words[7]);
3485 if (!saveq->bpl_dmabuf)
3486 lpfc_printf_log(phba,
3489 "0342 Ring %d Cannot find buffer for an"
3490 " unsolicited iocb. tag 0x%x\n",
3492 irsp->unsli3.sli3Words[7]);
3494 list_for_each_entry(iocbq, &saveq->list, list) {
3495 irsp = &iocbq->iocb;
3496 if (irsp->ulpBdeCount != 0) {
3497 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3499 irsp->un.ulpWord[3]);
3500 if (!iocbq->cmd_dmabuf)
3501 lpfc_printf_log(phba,
3504 "0343 Ring %d Cannot find "
3505 "buffer for an unsolicited iocb"
3506 ". tag 0x%x\n", pring->ringno,
3507 irsp->un.ulpWord[3]);
3509 if (irsp->ulpBdeCount == 2) {
3510 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3512 irsp->unsli3.sli3Words[7]);
3513 if (!iocbq->bpl_dmabuf)
3514 lpfc_printf_log(phba,
3517 "0344 Ring %d Cannot find "
3518 "buffer for an unsolicited "
3521 irsp->unsli3.sli3Words[7]);
3525 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3526 irsp->un.cont64[0].addrLow);
3527 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3529 if (irsp->ulpBdeCount == 2) {
3530 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3531 irsp->un.cont64[1].addrLow);
3532 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3538 if (irsp->ulpBdeCount != 0 &&
3539 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3540 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3543 /* search continue save q for same XRI */
3544 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3545 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3546 saveq->iocb.unsli3.rcvsli3.ox_id) {
3547 list_add_tail(&saveq->list, &iocbq->list);
3553 list_add_tail(&saveq->clist,
3554 &pring->iocb_continue_saveq);
3556 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3557 list_del_init(&iocbq->clist);
3559 irsp = &saveq->iocb;
3564 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3565 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3566 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3567 Rctl = FC_RCTL_ELS_REQ;
3570 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3571 Rctl = w5p->hcsw.Rctl;
3572 Type = w5p->hcsw.Type;
3574 /* Firmware Workaround */
3575 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3576 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3577 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3578 Rctl = FC_RCTL_ELS_REQ;
3580 w5p->hcsw.Rctl = Rctl;
3581 w5p->hcsw.Type = Type;
3585 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3586 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3587 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3588 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3589 saveq->vport = phba->pport;
3591 saveq->vport = lpfc_find_vport_by_vpid(phba,
3592 irsp->unsli3.rcvsli3.vpi);
3595 /* Prepare WQE with Unsol frame */
3596 lpfc_sli_prep_unsol_wqe(phba, saveq);
3598 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3599 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3600 "0313 Ring %d handler: unexpected Rctl x%x "
3601 "Type x%x received\n",
3602 pring->ringno, Rctl, Type);
3608 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3609 * @phba: Pointer to HBA context object.
3610 * @pring: Pointer to driver SLI ring object.
3611 * @prspiocb: Pointer to response iocb object.
3613 * This function looks up the iocb_lookup table to get the command iocb
3614 * corresponding to the given response iocb using the iotag of the
3615 * response iocb. The driver calls this function with the hbalock held
3616 * for SLI3 ports or the ring lock held for SLI4 ports.
3617 * This function returns the command iocb object if it finds the command
3618 * iocb else returns NULL.
3620 static struct lpfc_iocbq *
3621 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3622 struct lpfc_sli_ring *pring,
3623 struct lpfc_iocbq *prspiocb)
3625 struct lpfc_iocbq *cmd_iocb = NULL;
3628 if (phba->sli_rev == LPFC_SLI_REV4)
3629 iotag = get_wqe_reqtag(prspiocb);
3631 iotag = prspiocb->iocb.ulpIoTag;
3633 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3634 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3635 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3636 /* remove from txcmpl queue list */
3637 list_del_init(&cmd_iocb->list);
3638 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3639 pring->txcmplq_cnt--;
3644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3645 "0317 iotag x%x is out of "
3646 "range: max iotag x%x\n",
3647 iotag, phba->sli.last_iotag);
3652 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3653 * @phba: Pointer to HBA context object.
3654 * @pring: Pointer to driver SLI ring object.
3657 * This function looks up the iocb_lookup table to get the command iocb
3658 * corresponding to the given iotag. The driver calls this function with
3659 * the ring lock held because this function is an SLI4 port only helper.
3660 * This function returns the command iocb object if it finds the command
3661 * iocb else returns NULL.
3663 static struct lpfc_iocbq *
3664 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3665 struct lpfc_sli_ring *pring, uint16_t iotag)
3667 struct lpfc_iocbq *cmd_iocb = NULL;
3669 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3670 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3671 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3672 /* remove from txcmpl queue list */
3673 list_del_init(&cmd_iocb->list);
3674 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3675 pring->txcmplq_cnt--;
3680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3681 "0372 iotag x%x lookup error: max iotag (x%x) "
3683 iotag, phba->sli.last_iotag,
3684 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3689 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3690 * @phba: Pointer to HBA context object.
3691 * @pring: Pointer to driver SLI ring object.
3692 * @saveq: Pointer to the response iocb to be processed.
3694 * This function is called by the ring event handler for non-fcp
3695 * rings when there is a new response iocb in the response ring.
3696 * The caller is not required to hold any locks. This function
3697 * gets the command iocb associated with the response iocb and
3698 * calls the completion handler for the command iocb. If there
3699 * is no completion handler, the function will free the resources
3700 * associated with command iocb. If the response iocb is for
3701 * an already aborted command iocb, the status of the completion
3702 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3703 * This function always returns 1.
3706 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3707 struct lpfc_iocbq *saveq)
3709 struct lpfc_iocbq *cmdiocbp;
3710 unsigned long iflag;
3711 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3713 if (phba->sli_rev == LPFC_SLI_REV4)
3714 spin_lock_irqsave(&pring->ring_lock, iflag);
3716 spin_lock_irqsave(&phba->hbalock, iflag);
3717 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3718 if (phba->sli_rev == LPFC_SLI_REV4)
3719 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3721 spin_unlock_irqrestore(&phba->hbalock, iflag);
3723 ulp_command = get_job_cmnd(phba, saveq);
3724 ulp_status = get_job_ulpstatus(phba, saveq);
3725 ulp_word4 = get_job_word4(phba, saveq);
3726 ulp_context = get_job_ulpcontext(phba, saveq);
3727 if (phba->sli_rev == LPFC_SLI_REV4)
3728 iotag = get_wqe_reqtag(saveq);
3730 iotag = saveq->iocb.ulpIoTag;
3733 ulp_command = get_job_cmnd(phba, cmdiocbp);
3734 if (cmdiocbp->cmd_cmpl) {
3736 * If an ELS command failed send an event to mgmt
3740 (pring->ringno == LPFC_ELS_RING) &&
3741 (ulp_command == CMD_ELS_REQUEST64_CR))
3742 lpfc_send_els_failure_event(phba,
3746 * Post all ELS completions to the worker thread.
3747 * All other are passed to the completion callback.
3749 if (pring->ringno == LPFC_ELS_RING) {
3750 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3751 (cmdiocbp->cmd_flag &
3752 LPFC_DRIVER_ABORTED)) {
3753 spin_lock_irqsave(&phba->hbalock,
3755 cmdiocbp->cmd_flag &=
3756 ~LPFC_DRIVER_ABORTED;
3757 spin_unlock_irqrestore(&phba->hbalock,
3759 saveq->iocb.ulpStatus =
3760 IOSTAT_LOCAL_REJECT;
3761 saveq->iocb.un.ulpWord[4] =
3764 /* Firmware could still be in progress
3765 * of DMAing payload, so don't free data
3766 * buffer till after a hbeat.
3768 spin_lock_irqsave(&phba->hbalock,
3770 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3771 spin_unlock_irqrestore(&phba->hbalock,
3774 if (phba->sli_rev == LPFC_SLI_REV4) {
3775 if (saveq->cmd_flag &
3776 LPFC_EXCHANGE_BUSY) {
3777 /* Set cmdiocb flag for the
3778 * exchange busy so sgl (xri)
3779 * will not be released until
3780 * the abort xri is received
3784 &phba->hbalock, iflag);
3785 cmdiocbp->cmd_flag |=
3787 spin_unlock_irqrestore(
3788 &phba->hbalock, iflag);
3790 if (cmdiocbp->cmd_flag &
3791 LPFC_DRIVER_ABORTED) {
3793 * Clear LPFC_DRIVER_ABORTED
3794 * bit in case it was driver
3798 &phba->hbalock, iflag);
3799 cmdiocbp->cmd_flag &=
3800 ~LPFC_DRIVER_ABORTED;
3801 spin_unlock_irqrestore(
3802 &phba->hbalock, iflag);
3803 set_job_ulpstatus(cmdiocbp,
3804 IOSTAT_LOCAL_REJECT);
3805 set_job_ulpword4(cmdiocbp,
3806 IOERR_ABORT_REQUESTED);
3808 * For SLI4, irspiocb contains
3809 * NO_XRI in sli_xritag, it
3810 * shall not affect releasing
3811 * sgl (xri) process.
3813 set_job_ulpstatus(saveq,
3814 IOSTAT_LOCAL_REJECT);
3815 set_job_ulpword4(saveq,
3818 &phba->hbalock, iflag);
3820 LPFC_DELAY_MEM_FREE;
3821 spin_unlock_irqrestore(
3822 &phba->hbalock, iflag);
3826 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3828 lpfc_sli_release_iocbq(phba, cmdiocbp);
3831 * Unknown initiating command based on the response iotag.
3832 * This could be the case on the ELS ring because of
3835 if (pring->ringno != LPFC_ELS_RING) {
3837 * Ring <ringno> handler: unexpected completion IoTag
3840 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3841 "0322 Ring %d handler: "
3842 "unexpected completion IoTag x%x "
3843 "Data: x%x x%x x%x x%x\n",
3844 pring->ringno, iotag, ulp_status,
3845 ulp_word4, ulp_command, ulp_context);
3853 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3854 * @phba: Pointer to HBA context object.
3855 * @pring: Pointer to driver SLI ring object.
3857 * This function is called from the iocb ring event handlers when
3858 * put pointer is ahead of the get pointer for a ring. This function signal
3859 * an error attention condition to the worker thread and the worker
3860 * thread will transition the HBA to offline state.
3863 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3865 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3867 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3868 * rsp ring <portRspMax>
3870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3871 "0312 Ring %d handler: portRspPut %d "
3872 "is bigger than rsp ring %d\n",
3873 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3874 pring->sli.sli3.numRiocb);
3876 phba->link_state = LPFC_HBA_ERROR;
3879 * All error attention handlers are posted to
3882 phba->work_ha |= HA_ERATT;
3883 phba->work_hs = HS_FFER3;
3885 lpfc_worker_wake_up(phba);
3891 * lpfc_poll_eratt - Error attention polling timer timeout handler
3892 * @t: Context to fetch pointer to address of HBA context object from.
3894 * This function is invoked by the Error Attention polling timer when the
3895 * timer times out. It will check the SLI Error Attention register for
3896 * possible attention events. If so, it will post an Error Attention event
3897 * and wake up worker thread to process it. Otherwise, it will set up the
3898 * Error Attention polling timer for the next poll.
3900 void lpfc_poll_eratt(struct timer_list *t)
3902 struct lpfc_hba *phba;
3904 uint64_t sli_intr, cnt;
3906 phba = from_timer(phba, t, eratt_poll);
3908 /* Here we will also keep track of interrupts per sec of the hba */
3909 sli_intr = phba->sli.slistat.sli_intr;
3911 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3912 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3915 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3917 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3918 do_div(cnt, phba->eratt_poll_interval);
3919 phba->sli.slistat.sli_ips = cnt;
3921 phba->sli.slistat.sli_prev_intr = sli_intr;
3923 /* Check chip HA register for error event */
3924 eratt = lpfc_sli_check_eratt(phba);
3927 /* Tell the worker thread there is work to do */
3928 lpfc_worker_wake_up(phba);
3930 /* Restart the timer for next eratt poll */
3931 mod_timer(&phba->eratt_poll,
3933 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3939 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3940 * @phba: Pointer to HBA context object.
3941 * @pring: Pointer to driver SLI ring object.
3942 * @mask: Host attention register mask for this ring.
3944 * This function is called from the interrupt context when there is a ring
3945 * event for the fcp ring. The caller does not hold any lock.
3946 * The function processes each response iocb in the response ring until it
3947 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3948 * LE bit set. The function will call the completion handler of the command iocb
3949 * if the response iocb indicates a completion for a command iocb or it is
3950 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3951 * function if this is an unsolicited iocb.
3952 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3953 * to check it explicitly.
3956 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3957 struct lpfc_sli_ring *pring, uint32_t mask)
3959 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3960 IOCB_t *irsp = NULL;
3961 IOCB_t *entry = NULL;
3962 struct lpfc_iocbq *cmdiocbq = NULL;
3963 struct lpfc_iocbq rspiocbq;
3965 uint32_t portRspPut, portRspMax;
3967 lpfc_iocb_type type;
3968 unsigned long iflag;
3969 uint32_t rsp_cmpl = 0;
3971 spin_lock_irqsave(&phba->hbalock, iflag);
3972 pring->stats.iocb_event++;
3975 * The next available response entry should never exceed the maximum
3976 * entries. If it does, treat it as an adapter hardware error.
3978 portRspMax = pring->sli.sli3.numRiocb;
3979 portRspPut = le32_to_cpu(pgp->rspPutInx);
3980 if (unlikely(portRspPut >= portRspMax)) {
3981 lpfc_sli_rsp_pointers_error(phba, pring);
3982 spin_unlock_irqrestore(&phba->hbalock, iflag);
3985 if (phba->fcp_ring_in_use) {
3986 spin_unlock_irqrestore(&phba->hbalock, iflag);
3989 phba->fcp_ring_in_use = 1;
3992 while (pring->sli.sli3.rspidx != portRspPut) {
3994 * Fetch an entry off the ring and copy it into a local data
3995 * structure. The copy involves a byte-swap since the
3996 * network byte order and pci byte orders are different.
3998 entry = lpfc_resp_iocb(phba, pring);
3999 phba->last_completion_time = jiffies;
4001 if (++pring->sli.sli3.rspidx >= portRspMax)
4002 pring->sli.sli3.rspidx = 0;
4004 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4005 (uint32_t *) &rspiocbq.iocb,
4006 phba->iocb_rsp_size);
4007 INIT_LIST_HEAD(&(rspiocbq.list));
4008 irsp = &rspiocbq.iocb;
4010 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4011 pring->stats.iocb_rsp++;
4014 if (unlikely(irsp->ulpStatus)) {
4016 * If resource errors reported from HBA, reduce
4017 * queuedepths of the SCSI device.
4019 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4020 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4021 IOERR_NO_RESOURCES)) {
4022 spin_unlock_irqrestore(&phba->hbalock, iflag);
4023 phba->lpfc_rampdown_queue_depth(phba);
4024 spin_lock_irqsave(&phba->hbalock, iflag);
4027 /* Rsp ring <ringno> error: IOCB */
4028 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4029 "0336 Rsp Ring %d error: IOCB Data: "
4030 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4032 irsp->un.ulpWord[0],
4033 irsp->un.ulpWord[1],
4034 irsp->un.ulpWord[2],
4035 irsp->un.ulpWord[3],
4036 irsp->un.ulpWord[4],
4037 irsp->un.ulpWord[5],
4038 *(uint32_t *)&irsp->un1,
4039 *((uint32_t *)&irsp->un1 + 1));
4043 case LPFC_ABORT_IOCB:
4046 * Idle exchange closed via ABTS from port. No iocb
4047 * resources need to be recovered.
4049 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4050 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4051 "0333 IOCB cmd 0x%x"
4052 " processed. Skipping"
4058 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4060 if (unlikely(!cmdiocbq))
4062 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4063 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4064 if (cmdiocbq->cmd_cmpl) {
4065 spin_unlock_irqrestore(&phba->hbalock, iflag);
4066 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4067 spin_lock_irqsave(&phba->hbalock, iflag);
4070 case LPFC_UNSOL_IOCB:
4071 spin_unlock_irqrestore(&phba->hbalock, iflag);
4072 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4073 spin_lock_irqsave(&phba->hbalock, iflag);
4076 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4077 char adaptermsg[LPFC_MAX_ADPTMSG];
4078 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4079 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4081 dev_warn(&((phba->pcidev)->dev),
4083 phba->brd_no, adaptermsg);
4085 /* Unknown IOCB command */
4086 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4087 "0334 Unknown IOCB command "
4088 "Data: x%x, x%x x%x x%x x%x\n",
4089 type, irsp->ulpCommand,
4098 * The response IOCB has been processed. Update the ring
4099 * pointer in SLIM. If the port response put pointer has not
4100 * been updated, sync the pgp->rspPutInx and fetch the new port
4101 * response put pointer.
4103 writel(pring->sli.sli3.rspidx,
4104 &phba->host_gp[pring->ringno].rspGetInx);
4106 if (pring->sli.sli3.rspidx == portRspPut)
4107 portRspPut = le32_to_cpu(pgp->rspPutInx);
4110 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4111 pring->stats.iocb_rsp_full++;
4112 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4113 writel(status, phba->CAregaddr);
4114 readl(phba->CAregaddr);
4116 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4117 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4118 pring->stats.iocb_cmd_empty++;
4120 /* Force update of the local copy of cmdGetInx */
4121 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4122 lpfc_sli_resume_iocb(phba, pring);
4124 if ((pring->lpfc_sli_cmd_available))
4125 (pring->lpfc_sli_cmd_available) (phba, pring);
4129 phba->fcp_ring_in_use = 0;
4130 spin_unlock_irqrestore(&phba->hbalock, iflag);
4135 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4136 * @phba: Pointer to HBA context object.
4137 * @pring: Pointer to driver SLI ring object.
4138 * @rspiocbp: Pointer to driver response IOCB object.
4140 * This function is called from the worker thread when there is a slow-path
4141 * response IOCB to process. This function chains all the response iocbs until
4142 * seeing the iocb with the LE bit set. The function will call
4143 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4144 * completion of a command iocb. The function will call the
4145 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4146 * The function frees the resources or calls the completion handler if this
4147 * iocb is an abort completion. The function returns NULL when the response
4148 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4149 * this function shall chain the iocb on to the iocb_continueq and return the
4150 * response iocb passed in.
4152 static struct lpfc_iocbq *
4153 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4154 struct lpfc_iocbq *rspiocbp)
4156 struct lpfc_iocbq *saveq;
4157 struct lpfc_iocbq *cmdiocb;
4158 struct lpfc_iocbq *next_iocb;
4160 uint32_t free_saveq;
4162 lpfc_iocb_type type;
4163 unsigned long iflag;
4164 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4165 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4166 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4169 spin_lock_irqsave(&phba->hbalock, iflag);
4170 /* First add the response iocb to the countinueq list */
4171 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4172 pring->iocb_continueq_cnt++;
4175 * By default, the driver expects to free all resources
4176 * associated with this iocb completion.
4179 saveq = list_get_first(&pring->iocb_continueq,
4180 struct lpfc_iocbq, list);
4181 list_del_init(&pring->iocb_continueq);
4182 pring->iocb_continueq_cnt = 0;
4184 pring->stats.iocb_rsp++;
4187 * If resource errors reported from HBA, reduce
4188 * queuedepths of the SCSI device.
4190 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4191 ((ulp_word4 & IOERR_PARAM_MASK) ==
4192 IOERR_NO_RESOURCES)) {
4193 spin_unlock_irqrestore(&phba->hbalock, iflag);
4194 phba->lpfc_rampdown_queue_depth(phba);
4195 spin_lock_irqsave(&phba->hbalock, iflag);
4199 /* Rsp ring <ringno> error: IOCB */
4200 if (phba->sli_rev < LPFC_SLI_REV4) {
4201 irsp = &rspiocbp->iocb;
4202 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4203 "0328 Rsp Ring %d error: ulp_status x%x "
4205 "x%08x x%08x x%08x x%08x "
4206 "x%08x x%08x x%08x x%08x "
4207 "x%08x x%08x x%08x x%08x "
4208 "x%08x x%08x x%08x x%08x\n",
4209 pring->ringno, ulp_status,
4210 get_job_ulpword(rspiocbp, 0),
4211 get_job_ulpword(rspiocbp, 1),
4212 get_job_ulpword(rspiocbp, 2),
4213 get_job_ulpword(rspiocbp, 3),
4214 get_job_ulpword(rspiocbp, 4),
4215 get_job_ulpword(rspiocbp, 5),
4216 *(((uint32_t *)irsp) + 6),
4217 *(((uint32_t *)irsp) + 7),
4218 *(((uint32_t *)irsp) + 8),
4219 *(((uint32_t *)irsp) + 9),
4220 *(((uint32_t *)irsp) + 10),
4221 *(((uint32_t *)irsp) + 11),
4222 *(((uint32_t *)irsp) + 12),
4223 *(((uint32_t *)irsp) + 13),
4224 *(((uint32_t *)irsp) + 14),
4225 *(((uint32_t *)irsp) + 15));
4227 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4228 "0321 Rsp Ring %d error: "
4230 "x%x x%x x%x x%x\n",
4232 rspiocbp->wcqe_cmpl.word0,
4233 rspiocbp->wcqe_cmpl.total_data_placed,
4234 rspiocbp->wcqe_cmpl.parameter,
4235 rspiocbp->wcqe_cmpl.word3);
4241 * Fetch the iocb command type and call the correct completion
4242 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4243 * get freed back to the lpfc_iocb_list by the discovery
4246 cmd_type = ulp_command & CMD_IOCB_MASK;
4247 type = lpfc_sli_iocb_cmd_type(cmd_type);
4250 spin_unlock_irqrestore(&phba->hbalock, iflag);
4251 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4252 spin_lock_irqsave(&phba->hbalock, iflag);
4254 case LPFC_UNSOL_IOCB:
4255 spin_unlock_irqrestore(&phba->hbalock, iflag);
4256 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4257 spin_lock_irqsave(&phba->hbalock, iflag);
4261 case LPFC_ABORT_IOCB:
4263 if (ulp_command != CMD_XRI_ABORTED_CX)
4264 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4267 /* Call the specified completion routine */
4268 if (cmdiocb->cmd_cmpl) {
4269 spin_unlock_irqrestore(&phba->hbalock, iflag);
4270 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4271 spin_lock_irqsave(&phba->hbalock, iflag);
4273 __lpfc_sli_release_iocbq(phba, cmdiocb);
4277 case LPFC_UNKNOWN_IOCB:
4278 if (ulp_command == CMD_ADAPTER_MSG) {
4279 char adaptermsg[LPFC_MAX_ADPTMSG];
4281 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4282 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4284 dev_warn(&((phba->pcidev)->dev),
4286 phba->brd_no, adaptermsg);
4288 /* Unknown command */
4289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4290 "0335 Unknown IOCB "
4291 "command Data: x%x "
4295 get_wqe_reqtag(rspiocbp),
4296 get_job_ulpcontext(phba, rspiocbp));
4302 list_for_each_entry_safe(rspiocbp, next_iocb,
4303 &saveq->list, list) {
4304 list_del_init(&rspiocbp->list);
4305 __lpfc_sli_release_iocbq(phba, rspiocbp);
4307 __lpfc_sli_release_iocbq(phba, saveq);
4310 spin_unlock_irqrestore(&phba->hbalock, iflag);
4315 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4316 * @phba: Pointer to HBA context object.
4317 * @pring: Pointer to driver SLI ring object.
4318 * @mask: Host attention register mask for this ring.
4320 * This routine wraps the actual slow_ring event process routine from the
4321 * API jump table function pointer from the lpfc_hba struct.
4324 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4325 struct lpfc_sli_ring *pring, uint32_t mask)
4327 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4331 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4332 * @phba: Pointer to HBA context object.
4333 * @pring: Pointer to driver SLI ring object.
4334 * @mask: Host attention register mask for this ring.
4336 * This function is called from the worker thread when there is a ring event
4337 * for non-fcp rings. The caller does not hold any lock. The function will
4338 * remove each response iocb in the response ring and calls the handle
4339 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4342 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4343 struct lpfc_sli_ring *pring, uint32_t mask)
4345 struct lpfc_pgp *pgp;
4347 IOCB_t *irsp = NULL;
4348 struct lpfc_iocbq *rspiocbp = NULL;
4349 uint32_t portRspPut, portRspMax;
4350 unsigned long iflag;
4353 pgp = &phba->port_gp[pring->ringno];
4354 spin_lock_irqsave(&phba->hbalock, iflag);
4355 pring->stats.iocb_event++;
4358 * The next available response entry should never exceed the maximum
4359 * entries. If it does, treat it as an adapter hardware error.
4361 portRspMax = pring->sli.sli3.numRiocb;
4362 portRspPut = le32_to_cpu(pgp->rspPutInx);
4363 if (portRspPut >= portRspMax) {
4365 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4366 * rsp ring <portRspMax>
4368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4369 "0303 Ring %d handler: portRspPut %d "
4370 "is bigger than rsp ring %d\n",
4371 pring->ringno, portRspPut, portRspMax);
4373 phba->link_state = LPFC_HBA_ERROR;
4374 spin_unlock_irqrestore(&phba->hbalock, iflag);
4376 phba->work_hs = HS_FFER3;
4377 lpfc_handle_eratt(phba);
4383 while (pring->sli.sli3.rspidx != portRspPut) {
4385 * Build a completion list and call the appropriate handler.
4386 * The process is to get the next available response iocb, get
4387 * a free iocb from the list, copy the response data into the
4388 * free iocb, insert to the continuation list, and update the
4389 * next response index to slim. This process makes response
4390 * iocb's in the ring available to DMA as fast as possible but
4391 * pays a penalty for a copy operation. Since the iocb is
4392 * only 32 bytes, this penalty is considered small relative to
4393 * the PCI reads for register values and a slim write. When
4394 * the ulpLe field is set, the entire Command has been
4397 entry = lpfc_resp_iocb(phba, pring);
4399 phba->last_completion_time = jiffies;
4400 rspiocbp = __lpfc_sli_get_iocbq(phba);
4401 if (rspiocbp == NULL) {
4402 printk(KERN_ERR "%s: out of buffers! Failing "
4403 "completion.\n", __func__);
4407 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4408 phba->iocb_rsp_size);
4409 irsp = &rspiocbp->iocb;
4411 if (++pring->sli.sli3.rspidx >= portRspMax)
4412 pring->sli.sli3.rspidx = 0;
4414 if (pring->ringno == LPFC_ELS_RING) {
4415 lpfc_debugfs_slow_ring_trc(phba,
4416 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4417 *(((uint32_t *) irsp) + 4),
4418 *(((uint32_t *) irsp) + 6),
4419 *(((uint32_t *) irsp) + 7));
4422 writel(pring->sli.sli3.rspidx,
4423 &phba->host_gp[pring->ringno].rspGetInx);
4425 spin_unlock_irqrestore(&phba->hbalock, iflag);
4426 /* Handle the response IOCB */
4427 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4428 spin_lock_irqsave(&phba->hbalock, iflag);
4431 * If the port response put pointer has not been updated, sync
4432 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4433 * response put pointer.
4435 if (pring->sli.sli3.rspidx == portRspPut) {
4436 portRspPut = le32_to_cpu(pgp->rspPutInx);
4438 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4440 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4441 /* At least one response entry has been freed */
4442 pring->stats.iocb_rsp_full++;
4443 /* SET RxRE_RSP in Chip Att register */
4444 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4445 writel(status, phba->CAregaddr);
4446 readl(phba->CAregaddr); /* flush */
4448 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4449 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4450 pring->stats.iocb_cmd_empty++;
4452 /* Force update of the local copy of cmdGetInx */
4453 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4454 lpfc_sli_resume_iocb(phba, pring);
4456 if ((pring->lpfc_sli_cmd_available))
4457 (pring->lpfc_sli_cmd_available) (phba, pring);
4461 spin_unlock_irqrestore(&phba->hbalock, iflag);
4466 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4467 * @phba: Pointer to HBA context object.
4468 * @pring: Pointer to driver SLI ring object.
4469 * @mask: Host attention register mask for this ring.
4471 * This function is called from the worker thread when there is a pending
4472 * ELS response iocb on the driver internal slow-path response iocb worker
4473 * queue. The caller does not hold any lock. The function will remove each
4474 * response iocb from the response worker queue and calls the handle
4475 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4478 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4479 struct lpfc_sli_ring *pring, uint32_t mask)
4481 struct lpfc_iocbq *irspiocbq;
4482 struct hbq_dmabuf *dmabuf;
4483 struct lpfc_cq_event *cq_event;
4484 unsigned long iflag;
4487 spin_lock_irqsave(&phba->hbalock, iflag);
4488 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4489 spin_unlock_irqrestore(&phba->hbalock, iflag);
4490 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4491 /* Get the response iocb from the head of work queue */
4492 spin_lock_irqsave(&phba->hbalock, iflag);
4493 list_remove_head(&phba->sli4_hba.sp_queue_event,
4494 cq_event, struct lpfc_cq_event, list);
4495 spin_unlock_irqrestore(&phba->hbalock, iflag);
4497 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4498 case CQE_CODE_COMPL_WQE:
4499 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4501 /* Translate ELS WCQE to response IOCBQ */
4502 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4505 lpfc_sli_sp_handle_rspiocb(phba, pring,
4509 case CQE_CODE_RECEIVE:
4510 case CQE_CODE_RECEIVE_V1:
4511 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4513 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4520 /* Limit the number of events to 64 to avoid soft lockups */
4527 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4528 * @phba: Pointer to HBA context object.
4529 * @pring: Pointer to driver SLI ring object.
4531 * This function aborts all iocbs in the given ring and frees all the iocb
4532 * objects in txq. This function issues an abort iocb for all the iocb commands
4533 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4534 * the return of this function. The caller is not required to hold any locks.
4537 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4539 LIST_HEAD(tx_completions);
4540 LIST_HEAD(txcmplq_completions);
4541 struct lpfc_iocbq *iocb, *next_iocb;
4544 if (pring->ringno == LPFC_ELS_RING) {
4545 lpfc_fabric_abort_hba(phba);
4547 offline = pci_channel_offline(phba->pcidev);
4549 /* Error everything on txq and txcmplq
4552 if (phba->sli_rev >= LPFC_SLI_REV4) {
4553 spin_lock_irq(&pring->ring_lock);
4554 list_splice_init(&pring->txq, &tx_completions);
4558 list_splice_init(&pring->txcmplq,
4559 &txcmplq_completions);
4561 /* Next issue ABTS for everything on the txcmplq */
4562 list_for_each_entry_safe(iocb, next_iocb,
4563 &pring->txcmplq, list)
4564 lpfc_sli_issue_abort_iotag(phba, pring,
4567 spin_unlock_irq(&pring->ring_lock);
4569 spin_lock_irq(&phba->hbalock);
4570 list_splice_init(&pring->txq, &tx_completions);
4574 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4576 /* Next issue ABTS for everything on the txcmplq */
4577 list_for_each_entry_safe(iocb, next_iocb,
4578 &pring->txcmplq, list)
4579 lpfc_sli_issue_abort_iotag(phba, pring,
4582 spin_unlock_irq(&phba->hbalock);
4586 /* Cancel all the IOCBs from the completions list */
4587 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4588 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4590 /* Make sure HBA is alive */
4591 lpfc_issue_hb_tmo(phba);
4593 /* Cancel all the IOCBs from the completions list */
4594 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4599 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4600 * @phba: Pointer to HBA context object.
4602 * This function aborts all iocbs in FCP rings and frees all the iocb
4603 * objects in txq. This function issues an abort iocb for all the iocb commands
4604 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4605 * the return of this function. The caller is not required to hold any locks.
4608 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4610 struct lpfc_sli *psli = &phba->sli;
4611 struct lpfc_sli_ring *pring;
4614 /* Look on all the FCP Rings for the iotag */
4615 if (phba->sli_rev >= LPFC_SLI_REV4) {
4616 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4617 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4618 lpfc_sli_abort_iocb_ring(phba, pring);
4621 pring = &psli->sli3_ring[LPFC_FCP_RING];
4622 lpfc_sli_abort_iocb_ring(phba, pring);
4627 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4628 * @phba: Pointer to HBA context object.
4630 * This function flushes all iocbs in the IO ring and frees all the iocb
4631 * objects in txq and txcmplq. This function will not issue abort iocbs
4632 * for all the iocb commands in txcmplq, they will just be returned with
4633 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4634 * slot has been permanently disabled.
4637 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4641 struct lpfc_sli *psli = &phba->sli;
4642 struct lpfc_sli_ring *pring;
4644 struct lpfc_iocbq *piocb, *next_iocb;
4646 spin_lock_irq(&phba->hbalock);
4647 /* Indicate the I/O queues are flushed */
4648 phba->hba_flag |= HBA_IOQ_FLUSH;
4649 spin_unlock_irq(&phba->hbalock);
4651 /* Look on all the FCP Rings for the iotag */
4652 if (phba->sli_rev >= LPFC_SLI_REV4) {
4653 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4654 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4656 spin_lock_irq(&pring->ring_lock);
4657 /* Retrieve everything on txq */
4658 list_splice_init(&pring->txq, &txq);
4659 list_for_each_entry_safe(piocb, next_iocb,
4660 &pring->txcmplq, list)
4661 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4662 /* Retrieve everything on the txcmplq */
4663 list_splice_init(&pring->txcmplq, &txcmplq);
4665 pring->txcmplq_cnt = 0;
4666 spin_unlock_irq(&pring->ring_lock);
4669 lpfc_sli_cancel_iocbs(phba, &txq,
4670 IOSTAT_LOCAL_REJECT,
4672 /* Flush the txcmplq */
4673 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4674 IOSTAT_LOCAL_REJECT,
4676 if (unlikely(pci_channel_offline(phba->pcidev)))
4677 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4680 pring = &psli->sli3_ring[LPFC_FCP_RING];
4682 spin_lock_irq(&phba->hbalock);
4683 /* Retrieve everything on txq */
4684 list_splice_init(&pring->txq, &txq);
4685 list_for_each_entry_safe(piocb, next_iocb,
4686 &pring->txcmplq, list)
4687 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4688 /* Retrieve everything on the txcmplq */
4689 list_splice_init(&pring->txcmplq, &txcmplq);
4691 pring->txcmplq_cnt = 0;
4692 spin_unlock_irq(&phba->hbalock);
4695 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4697 /* Flush the txcmpq */
4698 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4704 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4705 * @phba: Pointer to HBA context object.
4706 * @mask: Bit mask to be checked.
4708 * This function reads the host status register and compares
4709 * with the provided bit mask to check if HBA completed
4710 * the restart. This function will wait in a loop for the
4711 * HBA to complete restart. If the HBA does not restart within
4712 * 15 iterations, the function will reset the HBA again. The
4713 * function returns 1 when HBA fail to restart otherwise returns
4717 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4723 /* Read the HBA Host Status Register */
4724 if (lpfc_readl(phba->HSregaddr, &status))
4727 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4730 * Check status register every 100ms for 5 retries, then every
4731 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4732 * every 2.5 sec for 4.
4733 * Break our of the loop if errors occurred during init.
4735 while (((status & mask) != mask) &&
4736 !(status & HS_FFERM) &&
4748 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4749 lpfc_sli_brdrestart(phba);
4751 /* Read the HBA Host Status Register */
4752 if (lpfc_readl(phba->HSregaddr, &status)) {
4758 /* Check to see if any errors occurred during init */
4759 if ((status & HS_FFERM) || (i >= 20)) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4761 "2751 Adapter failed to restart, "
4762 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4764 readl(phba->MBslimaddr + 0xa8),
4765 readl(phba->MBslimaddr + 0xac));
4766 phba->link_state = LPFC_HBA_ERROR;
4774 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4775 * @phba: Pointer to HBA context object.
4776 * @mask: Bit mask to be checked.
4778 * This function checks the host status register to check if HBA is
4779 * ready. This function will wait in a loop for the HBA to be ready
4780 * If the HBA is not ready , the function will will reset the HBA PCI
4781 * function again. The function returns 1 when HBA fail to be ready
4782 * otherwise returns zero.
4785 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4790 /* Read the HBA Host Status Register */
4791 status = lpfc_sli4_post_status_check(phba);
4794 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4795 lpfc_sli_brdrestart(phba);
4796 status = lpfc_sli4_post_status_check(phba);
4799 /* Check to see if any errors occurred during init */
4801 phba->link_state = LPFC_HBA_ERROR;
4804 phba->sli4_hba.intr_enable = 0;
4806 phba->hba_flag &= ~HBA_SETUP;
4811 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4812 * @phba: Pointer to HBA context object.
4813 * @mask: Bit mask to be checked.
4815 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4816 * from the API jump table function pointer from the lpfc_hba struct.
4819 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4821 return phba->lpfc_sli_brdready(phba, mask);
4824 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4827 * lpfc_reset_barrier - Make HBA ready for HBA reset
4828 * @phba: Pointer to HBA context object.
4830 * This function is called before resetting an HBA. This function is called
4831 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4833 void lpfc_reset_barrier(struct lpfc_hba *phba)
4835 uint32_t __iomem *resp_buf;
4836 uint32_t __iomem *mbox_buf;
4837 volatile struct MAILBOX_word0 mbox;
4838 uint32_t hc_copy, ha_copy, resp_data;
4842 lockdep_assert_held(&phba->hbalock);
4844 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4845 if (hdrtype != 0x80 ||
4846 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4847 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4851 * Tell the other part of the chip to suspend temporarily all
4854 resp_buf = phba->MBslimaddr;
4856 /* Disable the error attention */
4857 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4859 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4860 readl(phba->HCregaddr); /* flush */
4861 phba->link_flag |= LS_IGNORE_ERATT;
4863 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4865 if (ha_copy & HA_ERATT) {
4866 /* Clear Chip error bit */
4867 writel(HA_ERATT, phba->HAregaddr);
4868 phba->pport->stopped = 1;
4872 mbox.mbxCommand = MBX_KILL_BOARD;
4873 mbox.mbxOwner = OWN_CHIP;
4875 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4876 mbox_buf = phba->MBslimaddr;
4877 writel(mbox.word0, mbox_buf);
4879 for (i = 0; i < 50; i++) {
4880 if (lpfc_readl((resp_buf + 1), &resp_data))
4882 if (resp_data != ~(BARRIER_TEST_PATTERN))
4888 if (lpfc_readl((resp_buf + 1), &resp_data))
4890 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4891 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4892 phba->pport->stopped)
4898 mbox.mbxOwner = OWN_HOST;
4900 for (i = 0; i < 500; i++) {
4901 if (lpfc_readl(resp_buf, &resp_data))
4903 if (resp_data != mbox.word0)
4912 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4914 if (!(ha_copy & HA_ERATT))
4920 if (readl(phba->HAregaddr) & HA_ERATT) {
4921 writel(HA_ERATT, phba->HAregaddr);
4922 phba->pport->stopped = 1;
4926 phba->link_flag &= ~LS_IGNORE_ERATT;
4927 writel(hc_copy, phba->HCregaddr);
4928 readl(phba->HCregaddr); /* flush */
4932 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4933 * @phba: Pointer to HBA context object.
4935 * This function issues a kill_board mailbox command and waits for
4936 * the error attention interrupt. This function is called for stopping
4937 * the firmware processing. The caller is not required to hold any
4938 * locks. This function calls lpfc_hba_down_post function to free
4939 * any pending commands after the kill. The function will return 1 when it
4940 * fails to kill the board else will return 0.
4943 lpfc_sli_brdkill(struct lpfc_hba *phba)
4945 struct lpfc_sli *psli;
4955 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4956 "0329 Kill HBA Data: x%x x%x\n",
4957 phba->pport->port_state, psli->sli_flag);
4959 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4963 /* Disable the error attention */
4964 spin_lock_irq(&phba->hbalock);
4965 if (lpfc_readl(phba->HCregaddr, &status)) {
4966 spin_unlock_irq(&phba->hbalock);
4967 mempool_free(pmb, phba->mbox_mem_pool);
4970 status &= ~HC_ERINT_ENA;
4971 writel(status, phba->HCregaddr);
4972 readl(phba->HCregaddr); /* flush */
4973 phba->link_flag |= LS_IGNORE_ERATT;
4974 spin_unlock_irq(&phba->hbalock);
4976 lpfc_kill_board(phba, pmb);
4977 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4978 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4980 if (retval != MBX_SUCCESS) {
4981 if (retval != MBX_BUSY)
4982 mempool_free(pmb, phba->mbox_mem_pool);
4983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4984 "2752 KILL_BOARD command failed retval %d\n",
4986 spin_lock_irq(&phba->hbalock);
4987 phba->link_flag &= ~LS_IGNORE_ERATT;
4988 spin_unlock_irq(&phba->hbalock);
4992 spin_lock_irq(&phba->hbalock);
4993 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4994 spin_unlock_irq(&phba->hbalock);
4996 mempool_free(pmb, phba->mbox_mem_pool);
4998 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4999 * attention every 100ms for 3 seconds. If we don't get ERATT after
5000 * 3 seconds we still set HBA_ERROR state because the status of the
5001 * board is now undefined.
5003 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5005 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5007 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5011 del_timer_sync(&psli->mbox_tmo);
5012 if (ha_copy & HA_ERATT) {
5013 writel(HA_ERATT, phba->HAregaddr);
5014 phba->pport->stopped = 1;
5016 spin_lock_irq(&phba->hbalock);
5017 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5018 psli->mbox_active = NULL;
5019 phba->link_flag &= ~LS_IGNORE_ERATT;
5020 spin_unlock_irq(&phba->hbalock);
5022 lpfc_hba_down_post(phba);
5023 phba->link_state = LPFC_HBA_ERROR;
5025 return ha_copy & HA_ERATT ? 0 : 1;
5029 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5030 * @phba: Pointer to HBA context object.
5032 * This function resets the HBA by writing HC_INITFF to the control
5033 * register. After the HBA resets, this function resets all the iocb ring
5034 * indices. This function disables PCI layer parity checking during
5036 * This function returns 0 always.
5037 * The caller is not required to hold any locks.
5040 lpfc_sli_brdreset(struct lpfc_hba *phba)
5042 struct lpfc_sli *psli;
5043 struct lpfc_sli_ring *pring;
5050 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5051 "0325 Reset HBA Data: x%x x%x\n",
5052 (phba->pport) ? phba->pport->port_state : 0,
5055 /* perform board reset */
5056 phba->fc_eventTag = 0;
5057 phba->link_events = 0;
5058 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5060 phba->pport->fc_myDID = 0;
5061 phba->pport->fc_prevDID = 0;
5064 /* Turn off parity checking and serr during the physical reset */
5065 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5068 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5070 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5072 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5074 /* Now toggle INITFF bit in the Host Control Register */
5075 writel(HC_INITFF, phba->HCregaddr);
5077 readl(phba->HCregaddr); /* flush */
5078 writel(0, phba->HCregaddr);
5079 readl(phba->HCregaddr); /* flush */
5081 /* Restore PCI cmd register */
5082 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5084 /* Initialize relevant SLI info */
5085 for (i = 0; i < psli->num_rings; i++) {
5086 pring = &psli->sli3_ring[i];
5088 pring->sli.sli3.rspidx = 0;
5089 pring->sli.sli3.next_cmdidx = 0;
5090 pring->sli.sli3.local_getidx = 0;
5091 pring->sli.sli3.cmdidx = 0;
5092 pring->missbufcnt = 0;
5095 phba->link_state = LPFC_WARM_START;
5100 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5101 * @phba: Pointer to HBA context object.
5103 * This function resets a SLI4 HBA. This function disables PCI layer parity
5104 * checking during resets the device. The caller is not required to hold
5107 * This function returns 0 on success else returns negative error code.
5110 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5112 struct lpfc_sli *psli = &phba->sli;
5117 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5118 "0295 Reset HBA Data: x%x x%x x%x\n",
5119 phba->pport->port_state, psli->sli_flag,
5122 /* perform board reset */
5123 phba->fc_eventTag = 0;
5124 phba->link_events = 0;
5125 phba->pport->fc_myDID = 0;
5126 phba->pport->fc_prevDID = 0;
5127 phba->hba_flag &= ~HBA_SETUP;
5129 spin_lock_irq(&phba->hbalock);
5130 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5131 phba->fcf.fcf_flag = 0;
5132 spin_unlock_irq(&phba->hbalock);
5134 /* Now physically reset the device */
5135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5136 "0389 Performing PCI function reset!\n");
5138 /* Turn off parity checking and serr during the physical reset */
5139 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5140 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5141 "3205 PCI read Config failed\n");
5145 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5146 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5148 /* Perform FCoE PCI function reset before freeing queue memory */
5149 rc = lpfc_pci_function_reset(phba);
5151 /* Restore PCI cmd register */
5152 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5158 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5159 * @phba: Pointer to HBA context object.
5161 * This function is called in the SLI initialization code path to
5162 * restart the HBA. The caller is not required to hold any lock.
5163 * This function writes MBX_RESTART mailbox command to the SLIM and
5164 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5165 * function to free any pending commands. The function enables
5166 * POST only during the first initialization. The function returns zero.
5167 * The function does not guarantee completion of MBX_RESTART mailbox
5168 * command before the return of this function.
5171 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5173 volatile struct MAILBOX_word0 mb;
5174 struct lpfc_sli *psli;
5175 void __iomem *to_slim;
5176 uint32_t hba_aer_enabled;
5178 spin_lock_irq(&phba->hbalock);
5180 /* Take PCIe device Advanced Error Reporting (AER) state */
5181 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5186 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5187 "0337 Restart HBA Data: x%x x%x\n",
5188 (phba->pport) ? phba->pport->port_state : 0,
5192 mb.mbxCommand = MBX_RESTART;
5195 lpfc_reset_barrier(phba);
5197 to_slim = phba->MBslimaddr;
5198 writel(mb.word0, to_slim);
5199 readl(to_slim); /* flush */
5201 /* Only skip post after fc_ffinit is completed */
5202 if (phba->pport && phba->pport->port_state)
5203 mb.word0 = 1; /* This is really setting up word1 */
5205 mb.word0 = 0; /* This is really setting up word1 */
5206 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5207 writel(mb.word0, to_slim);
5208 readl(to_slim); /* flush */
5210 lpfc_sli_brdreset(phba);
5212 phba->pport->stopped = 0;
5213 phba->link_state = LPFC_INIT_START;
5215 spin_unlock_irq(&phba->hbalock);
5217 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5218 psli->stats_start = ktime_get_seconds();
5220 /* Give the INITFF and Post time to settle. */
5223 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5224 if (hba_aer_enabled)
5225 pci_disable_pcie_error_reporting(phba->pcidev);
5227 lpfc_hba_down_post(phba);
5233 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5234 * @phba: Pointer to HBA context object.
5236 * This function is called in the SLI initialization code path to restart
5237 * a SLI4 HBA. The caller is not required to hold any lock.
5238 * At the end of the function, it calls lpfc_hba_down_post function to
5239 * free any pending commands.
5242 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5244 struct lpfc_sli *psli = &phba->sli;
5245 uint32_t hba_aer_enabled;
5249 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5250 "0296 Restart HBA Data: x%x x%x\n",
5251 phba->pport->port_state, psli->sli_flag);
5253 /* Take PCIe device Advanced Error Reporting (AER) state */
5254 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5256 rc = lpfc_sli4_brdreset(phba);
5258 phba->link_state = LPFC_HBA_ERROR;
5259 goto hba_down_queue;
5262 spin_lock_irq(&phba->hbalock);
5263 phba->pport->stopped = 0;
5264 phba->link_state = LPFC_INIT_START;
5266 phba->sli4_hba.fawwpn_flag = 0;
5267 spin_unlock_irq(&phba->hbalock);
5269 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5270 psli->stats_start = ktime_get_seconds();
5272 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
5273 if (hba_aer_enabled)
5274 pci_disable_pcie_error_reporting(phba->pcidev);
5277 lpfc_hba_down_post(phba);
5278 lpfc_sli4_queue_destroy(phba);
5284 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5285 * @phba: Pointer to HBA context object.
5287 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5288 * API jump table function pointer from the lpfc_hba struct.
5291 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5293 return phba->lpfc_sli_brdrestart(phba);
5297 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5298 * @phba: Pointer to HBA context object.
5300 * This function is called after a HBA restart to wait for successful
5301 * restart of the HBA. Successful restart of the HBA is indicated by
5302 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5303 * iteration, the function will restart the HBA again. The function returns
5304 * zero if HBA successfully restarted else returns negative error code.
5307 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5309 uint32_t status, i = 0;
5311 /* Read the HBA Host Status Register */
5312 if (lpfc_readl(phba->HSregaddr, &status))
5315 /* Check status register to see what current state is */
5317 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5319 /* Check every 10ms for 10 retries, then every 100ms for 90
5320 * retries, then every 1 sec for 50 retires for a total of
5321 * ~60 seconds before reset the board again and check every
5322 * 1 sec for 50 retries. The up to 60 seconds before the
5323 * board ready is required by the Falcon FIPS zeroization
5324 * complete, and any reset the board in between shall cause
5325 * restart of zeroization, further delay the board ready.
5328 /* Adapter failed to init, timeout, status reg
5330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5331 "0436 Adapter failed to init, "
5332 "timeout, status reg x%x, "
5333 "FW Data: A8 x%x AC x%x\n", status,
5334 readl(phba->MBslimaddr + 0xa8),
5335 readl(phba->MBslimaddr + 0xac));
5336 phba->link_state = LPFC_HBA_ERROR;
5340 /* Check to see if any errors occurred during init */
5341 if (status & HS_FFERM) {
5342 /* ERROR: During chipset initialization */
5343 /* Adapter failed to init, chipset, status reg
5345 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5346 "0437 Adapter failed to init, "
5347 "chipset, status reg x%x, "
5348 "FW Data: A8 x%x AC x%x\n", status,
5349 readl(phba->MBslimaddr + 0xa8),
5350 readl(phba->MBslimaddr + 0xac));
5351 phba->link_state = LPFC_HBA_ERROR;
5364 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5365 lpfc_sli_brdrestart(phba);
5367 /* Read the HBA Host Status Register */
5368 if (lpfc_readl(phba->HSregaddr, &status))
5372 /* Check to see if any errors occurred during init */
5373 if (status & HS_FFERM) {
5374 /* ERROR: During chipset initialization */
5375 /* Adapter failed to init, chipset, status reg <status> */
5376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5377 "0438 Adapter failed to init, chipset, "
5379 "FW Data: A8 x%x AC x%x\n", status,
5380 readl(phba->MBslimaddr + 0xa8),
5381 readl(phba->MBslimaddr + 0xac));
5382 phba->link_state = LPFC_HBA_ERROR;
5386 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5388 /* Clear all interrupt enable conditions */
5389 writel(0, phba->HCregaddr);
5390 readl(phba->HCregaddr); /* flush */
5392 /* setup host attn register */
5393 writel(0xffffffff, phba->HAregaddr);
5394 readl(phba->HAregaddr); /* flush */
5399 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5401 * This function calculates and returns the number of HBQs required to be
5405 lpfc_sli_hbq_count(void)
5407 return ARRAY_SIZE(lpfc_hbq_defs);
5411 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5413 * This function adds the number of hbq entries in every HBQ to get
5414 * the total number of hbq entries required for the HBA and returns
5418 lpfc_sli_hbq_entry_count(void)
5420 int hbq_count = lpfc_sli_hbq_count();
5424 for (i = 0; i < hbq_count; ++i)
5425 count += lpfc_hbq_defs[i]->entry_count;
5430 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5432 * This function calculates amount of memory required for all hbq entries
5433 * to be configured and returns the total memory required.
5436 lpfc_sli_hbq_size(void)
5438 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5442 * lpfc_sli_hbq_setup - configure and initialize HBQs
5443 * @phba: Pointer to HBA context object.
5445 * This function is called during the SLI initialization to configure
5446 * all the HBQs and post buffers to the HBQ. The caller is not
5447 * required to hold any locks. This function will return zero if successful
5448 * else it will return negative error code.
5451 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5453 int hbq_count = lpfc_sli_hbq_count();
5457 uint32_t hbq_entry_index;
5459 /* Get a Mailbox buffer to setup mailbox
5460 * commands for HBA initialization
5462 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5469 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5470 phba->link_state = LPFC_INIT_MBX_CMDS;
5471 phba->hbq_in_use = 1;
5473 hbq_entry_index = 0;
5474 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5475 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5476 phba->hbqs[hbqno].hbqPutIdx = 0;
5477 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5478 phba->hbqs[hbqno].entry_count =
5479 lpfc_hbq_defs[hbqno]->entry_count;
5480 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5481 hbq_entry_index, pmb);
5482 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5484 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5485 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5486 mbxStatus <status>, ring <num> */
5488 lpfc_printf_log(phba, KERN_ERR,
5489 LOG_SLI | LOG_VPORT,
5490 "1805 Adapter failed to init. "
5491 "Data: x%x x%x x%x\n",
5493 pmbox->mbxStatus, hbqno);
5495 phba->link_state = LPFC_HBA_ERROR;
5496 mempool_free(pmb, phba->mbox_mem_pool);
5500 phba->hbq_count = hbq_count;
5502 mempool_free(pmb, phba->mbox_mem_pool);
5504 /* Initially populate or replenish the HBQs */
5505 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5506 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5511 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5512 * @phba: Pointer to HBA context object.
5514 * This function is called during the SLI initialization to configure
5515 * all the HBQs and post buffers to the HBQ. The caller is not
5516 * required to hold any locks. This function will return zero if successful
5517 * else it will return negative error code.
5520 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5522 phba->hbq_in_use = 1;
5524 * Specific case when the MDS diagnostics is enabled and supported.
5525 * The receive buffer count is truncated to manage the incoming
5528 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5529 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5530 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5532 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5533 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5534 phba->hbq_count = 1;
5535 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5536 /* Initially populate or replenish the HBQs */
5541 * lpfc_sli_config_port - Issue config port mailbox command
5542 * @phba: Pointer to HBA context object.
5543 * @sli_mode: sli mode - 2/3
5545 * This function is called by the sli initialization code path
5546 * to issue config_port mailbox command. This function restarts the
5547 * HBA firmware and issues a config_port mailbox command to configure
5548 * the SLI interface in the sli mode specified by sli_mode
5549 * variable. The caller is not required to hold any locks.
5550 * The function returns 0 if successful, else returns negative error
5554 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5557 uint32_t resetcount = 0, rc = 0, done = 0;
5559 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5561 phba->link_state = LPFC_HBA_ERROR;
5565 phba->sli_rev = sli_mode;
5566 while (resetcount < 2 && !done) {
5567 spin_lock_irq(&phba->hbalock);
5568 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5569 spin_unlock_irq(&phba->hbalock);
5570 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5571 lpfc_sli_brdrestart(phba);
5572 rc = lpfc_sli_chipset_init(phba);
5576 spin_lock_irq(&phba->hbalock);
5577 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5578 spin_unlock_irq(&phba->hbalock);
5581 /* Call pre CONFIG_PORT mailbox command initialization. A
5582 * value of 0 means the call was successful. Any other
5583 * nonzero value is a failure, but if ERESTART is returned,
5584 * the driver may reset the HBA and try again.
5586 rc = lpfc_config_port_prep(phba);
5587 if (rc == -ERESTART) {
5588 phba->link_state = LPFC_LINK_UNKNOWN;
5593 phba->link_state = LPFC_INIT_MBX_CMDS;
5594 lpfc_config_port(phba, pmb);
5595 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5596 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5597 LPFC_SLI3_HBQ_ENABLED |
5598 LPFC_SLI3_CRP_ENABLED |
5599 LPFC_SLI3_DSS_ENABLED);
5600 if (rc != MBX_SUCCESS) {
5601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5602 "0442 Adapter failed to init, mbxCmd x%x "
5603 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5604 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5605 spin_lock_irq(&phba->hbalock);
5606 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5607 spin_unlock_irq(&phba->hbalock);
5610 /* Allow asynchronous mailbox command to go through */
5611 spin_lock_irq(&phba->hbalock);
5612 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5613 spin_unlock_irq(&phba->hbalock);
5616 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5617 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5618 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5619 "3110 Port did not grant ASABT\n");
5624 goto do_prep_failed;
5626 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5627 if (!pmb->u.mb.un.varCfgPort.cMA) {
5629 goto do_prep_failed;
5631 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5632 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5633 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5634 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5635 phba->max_vpi : phba->max_vports;
5639 if (pmb->u.mb.un.varCfgPort.gerbm)
5640 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5641 if (pmb->u.mb.un.varCfgPort.gcrp)
5642 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5644 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5645 phba->port_gp = phba->mbox->us.s3_pgp.port;
5647 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5648 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5649 phba->cfg_enable_bg = 0;
5650 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5651 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5652 "0443 Adapter did not grant "
5657 phba->hbq_get = NULL;
5658 phba->port_gp = phba->mbox->us.s2.port;
5662 mempool_free(pmb, phba->mbox_mem_pool);
5668 * lpfc_sli_hba_setup - SLI initialization function
5669 * @phba: Pointer to HBA context object.
5671 * This function is the main SLI initialization function. This function
5672 * is called by the HBA initialization code, HBA reset code and HBA
5673 * error attention handler code. Caller is not required to hold any
5674 * locks. This function issues config_port mailbox command to configure
5675 * the SLI, setup iocb rings and HBQ rings. In the end the function
5676 * calls the config_port_post function to issue init_link mailbox
5677 * command and to start the discovery. The function will return zero
5678 * if successful, else it will return negative error code.
5681 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5687 /* Enable ISR already does config_port because of config_msi mbx */
5688 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5689 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5692 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5694 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5696 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5697 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5698 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5701 "2709 This device supports "
5702 "Advanced Error Reporting (AER)\n");
5703 spin_lock_irq(&phba->hbalock);
5704 phba->hba_flag |= HBA_AER_ENABLED;
5705 spin_unlock_irq(&phba->hbalock);
5707 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5708 "2708 This device does not support "
5709 "Advanced Error Reporting (AER): %d\n",
5711 phba->cfg_aer_support = 0;
5715 if (phba->sli_rev == 3) {
5716 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5717 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5719 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5720 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5721 phba->sli3_options = 0;
5724 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5725 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5726 phba->sli_rev, phba->max_vpi);
5727 rc = lpfc_sli_ring_map(phba);
5730 goto lpfc_sli_hba_setup_error;
5732 /* Initialize VPIs. */
5733 if (phba->sli_rev == LPFC_SLI_REV3) {
5735 * The VPI bitmask and physical ID array are allocated
5736 * and initialized once only - at driver load. A port
5737 * reset doesn't need to reinitialize this memory.
5739 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5740 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5741 phba->vpi_bmask = kcalloc(longs,
5742 sizeof(unsigned long),
5744 if (!phba->vpi_bmask) {
5746 goto lpfc_sli_hba_setup_error;
5749 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5752 if (!phba->vpi_ids) {
5753 kfree(phba->vpi_bmask);
5755 goto lpfc_sli_hba_setup_error;
5757 for (i = 0; i < phba->max_vpi; i++)
5758 phba->vpi_ids[i] = i;
5763 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5764 rc = lpfc_sli_hbq_setup(phba);
5766 goto lpfc_sli_hba_setup_error;
5768 spin_lock_irq(&phba->hbalock);
5769 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5770 spin_unlock_irq(&phba->hbalock);
5772 rc = lpfc_config_port_post(phba);
5774 goto lpfc_sli_hba_setup_error;
5778 lpfc_sli_hba_setup_error:
5779 phba->link_state = LPFC_HBA_ERROR;
5780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5781 "0445 Firmware initialization failed\n");
5786 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5787 * @phba: Pointer to HBA context object.
5789 * This function issue a dump mailbox command to read config region
5790 * 23 and parse the records in the region and populate driver
5794 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5796 LPFC_MBOXQ_t *mboxq;
5797 struct lpfc_dmabuf *mp;
5798 struct lpfc_mqe *mqe;
5799 uint32_t data_length;
5802 /* Program the default value of vlan_id and fc_map */
5803 phba->valid_vlan = 0;
5804 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5805 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5806 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5808 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5812 mqe = &mboxq->u.mqe;
5813 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5815 goto out_free_mboxq;
5818 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5819 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5822 "(%d):2571 Mailbox cmd x%x Status x%x "
5823 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5824 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5825 "CQ: x%x x%x x%x x%x\n",
5826 mboxq->vport ? mboxq->vport->vpi : 0,
5827 bf_get(lpfc_mqe_command, mqe),
5828 bf_get(lpfc_mqe_status, mqe),
5829 mqe->un.mb_words[0], mqe->un.mb_words[1],
5830 mqe->un.mb_words[2], mqe->un.mb_words[3],
5831 mqe->un.mb_words[4], mqe->un.mb_words[5],
5832 mqe->un.mb_words[6], mqe->un.mb_words[7],
5833 mqe->un.mb_words[8], mqe->un.mb_words[9],
5834 mqe->un.mb_words[10], mqe->un.mb_words[11],
5835 mqe->un.mb_words[12], mqe->un.mb_words[13],
5836 mqe->un.mb_words[14], mqe->un.mb_words[15],
5837 mqe->un.mb_words[16], mqe->un.mb_words[50],
5839 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5840 mboxq->mcqe.trailer);
5844 goto out_free_mboxq;
5846 data_length = mqe->un.mb_words[5];
5847 if (data_length > DMP_RGN23_SIZE) {
5849 goto out_free_mboxq;
5852 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5856 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5861 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5862 * @phba: pointer to lpfc hba data structure.
5863 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5864 * @vpd: pointer to the memory to hold resulting port vpd data.
5865 * @vpd_size: On input, the number of bytes allocated to @vpd.
5866 * On output, the number of data bytes in @vpd.
5868 * This routine executes a READ_REV SLI4 mailbox command. In
5869 * addition, this routine gets the port vpd data.
5873 * -ENOMEM - could not allocated memory.
5876 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5877 uint8_t *vpd, uint32_t *vpd_size)
5881 struct lpfc_dmabuf *dmabuf;
5882 struct lpfc_mqe *mqe;
5884 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5889 * Get a DMA buffer for the vpd data resulting from the READ_REV
5892 dma_size = *vpd_size;
5893 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5894 &dmabuf->phys, GFP_KERNEL);
5895 if (!dmabuf->virt) {
5901 * The SLI4 implementation of READ_REV conflicts at word1,
5902 * bits 31:16 and SLI4 adds vpd functionality not present
5903 * in SLI3. This code corrects the conflicts.
5905 lpfc_read_rev(phba, mboxq);
5906 mqe = &mboxq->u.mqe;
5907 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5908 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5909 mqe->un.read_rev.word1 &= 0x0000FFFF;
5910 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5911 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5913 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5915 dma_free_coherent(&phba->pcidev->dev, dma_size,
5916 dmabuf->virt, dmabuf->phys);
5922 * The available vpd length cannot be bigger than the
5923 * DMA buffer passed to the port. Catch the less than
5924 * case and update the caller's size.
5926 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5927 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5929 memcpy(vpd, dmabuf->virt, *vpd_size);
5931 dma_free_coherent(&phba->pcidev->dev, dma_size,
5932 dmabuf->virt, dmabuf->phys);
5938 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5939 * @phba: pointer to lpfc hba data structure.
5941 * This routine retrieves SLI4 device physical port name this PCI function
5946 * otherwise - failed to retrieve controller attributes
5949 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5951 LPFC_MBOXQ_t *mboxq;
5952 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5953 struct lpfc_controller_attribute *cntl_attr;
5954 void *virtaddr = NULL;
5955 uint32_t alloclen, reqlen;
5956 uint32_t shdr_status, shdr_add_status;
5957 union lpfc_sli4_cfg_shdr *shdr;
5960 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5964 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5965 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5966 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5967 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5968 LPFC_SLI4_MBX_NEMBED);
5970 if (alloclen < reqlen) {
5971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5972 "3084 Allocated DMA memory size (%d) is "
5973 "less than the requested DMA memory size "
5974 "(%d)\n", alloclen, reqlen);
5976 goto out_free_mboxq;
5978 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5979 virtaddr = mboxq->sge_array->addr[0];
5980 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5981 shdr = &mbx_cntl_attr->cfg_shdr;
5982 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5983 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5984 if (shdr_status || shdr_add_status || rc) {
5985 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5986 "3085 Mailbox x%x (x%x/x%x) failed, "
5987 "rc:x%x, status:x%x, add_status:x%x\n",
5988 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5989 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5990 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5991 rc, shdr_status, shdr_add_status);
5993 goto out_free_mboxq;
5996 cntl_attr = &mbx_cntl_attr->cntl_attr;
5997 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5998 phba->sli4_hba.lnk_info.lnk_tp =
5999 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6000 phba->sli4_hba.lnk_info.lnk_no =
6001 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6002 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6003 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6005 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6006 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6007 sizeof(phba->BIOSVersion));
6009 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6010 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6011 "flash_id: x%02x, asic_rev: x%02x\n",
6012 phba->sli4_hba.lnk_info.lnk_tp,
6013 phba->sli4_hba.lnk_info.lnk_no,
6014 phba->BIOSVersion, phba->sli4_hba.flash_id,
6015 phba->sli4_hba.asic_rev);
6017 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6018 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6020 mempool_free(mboxq, phba->mbox_mem_pool);
6025 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6026 * @phba: pointer to lpfc hba data structure.
6028 * This routine retrieves SLI4 device physical port name this PCI function
6033 * otherwise - failed to retrieve physical port name
6036 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6038 LPFC_MBOXQ_t *mboxq;
6039 struct lpfc_mbx_get_port_name *get_port_name;
6040 uint32_t shdr_status, shdr_add_status;
6041 union lpfc_sli4_cfg_shdr *shdr;
6042 char cport_name = 0;
6045 /* We assume nothing at this point */
6046 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6047 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6049 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6052 /* obtain link type and link number via READ_CONFIG */
6053 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6054 lpfc_sli4_read_config(phba);
6055 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6056 goto retrieve_ppname;
6058 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6059 rc = lpfc_sli4_get_ctl_attr(phba);
6061 goto out_free_mboxq;
6064 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6065 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6066 sizeof(struct lpfc_mbx_get_port_name) -
6067 sizeof(struct lpfc_sli4_cfg_mhdr),
6068 LPFC_SLI4_MBX_EMBED);
6069 get_port_name = &mboxq->u.mqe.un.get_port_name;
6070 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6071 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6072 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6073 phba->sli4_hba.lnk_info.lnk_tp);
6074 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6075 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6076 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6077 if (shdr_status || shdr_add_status || rc) {
6078 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6079 "3087 Mailbox x%x (x%x/x%x) failed: "
6080 "rc:x%x, status:x%x, add_status:x%x\n",
6081 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6082 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6083 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6084 rc, shdr_status, shdr_add_status);
6086 goto out_free_mboxq;
6088 switch (phba->sli4_hba.lnk_info.lnk_no) {
6089 case LPFC_LINK_NUMBER_0:
6090 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6091 &get_port_name->u.response);
6092 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6094 case LPFC_LINK_NUMBER_1:
6095 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6096 &get_port_name->u.response);
6097 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6099 case LPFC_LINK_NUMBER_2:
6100 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6101 &get_port_name->u.response);
6102 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6104 case LPFC_LINK_NUMBER_3:
6105 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6106 &get_port_name->u.response);
6107 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6113 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6114 phba->Port[0] = cport_name;
6115 phba->Port[1] = '\0';
6116 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6117 "3091 SLI get port name: %s\n", phba->Port);
6121 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6122 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6124 mempool_free(mboxq, phba->mbox_mem_pool);
6129 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6130 * @phba: pointer to lpfc hba data structure.
6132 * This routine is called to explicitly arm the SLI4 device's completion and
6136 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6139 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6140 struct lpfc_sli4_hdw_queue *qp;
6141 struct lpfc_queue *eq;
6143 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6144 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6145 if (sli4_hba->nvmels_cq)
6146 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6149 if (sli4_hba->hdwq) {
6150 /* Loop thru all Hardware Queues */
6151 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6152 qp = &sli4_hba->hdwq[qidx];
6153 /* ARM the corresponding CQ */
6154 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6158 /* Loop thru all IRQ vectors */
6159 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6160 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6161 /* ARM the corresponding EQ */
6162 sli4_hba->sli4_write_eq_db(phba, eq,
6163 0, LPFC_QUEUE_REARM);
6167 if (phba->nvmet_support) {
6168 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6169 sli4_hba->sli4_write_cq_db(phba,
6170 sli4_hba->nvmet_cqset[qidx], 0,
6177 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6178 * @phba: Pointer to HBA context object.
6179 * @type: The resource extent type.
6180 * @extnt_count: buffer to hold port available extent count.
6181 * @extnt_size: buffer to hold element count per extent.
6183 * This function calls the port and retrievs the number of available
6184 * extents and their size for a particular extent type.
6186 * Returns: 0 if successful. Nonzero otherwise.
6189 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6190 uint16_t *extnt_count, uint16_t *extnt_size)
6195 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6198 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6202 /* Find out how many extents are available for this resource type */
6203 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6204 sizeof(struct lpfc_sli4_cfg_mhdr));
6205 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6206 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6207 length, LPFC_SLI4_MBX_EMBED);
6209 /* Send an extents count of 0 - the GET doesn't use it. */
6210 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6211 LPFC_SLI4_MBX_EMBED);
6217 if (!phba->sli4_hba.intr_enable)
6218 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6220 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6221 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6228 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6229 if (bf_get(lpfc_mbox_hdr_status,
6230 &rsrc_info->header.cfg_shdr.response)) {
6231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6232 "2930 Failed to get resource extents "
6233 "Status 0x%x Add'l Status 0x%x\n",
6234 bf_get(lpfc_mbox_hdr_status,
6235 &rsrc_info->header.cfg_shdr.response),
6236 bf_get(lpfc_mbox_hdr_add_status,
6237 &rsrc_info->header.cfg_shdr.response));
6242 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6244 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6247 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6248 "3162 Retrieved extents type-%d from port: count:%d, "
6249 "size:%d\n", type, *extnt_count, *extnt_size);
6252 mempool_free(mbox, phba->mbox_mem_pool);
6257 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6258 * @phba: Pointer to HBA context object.
6259 * @type: The extent type to check.
6261 * This function reads the current available extents from the port and checks
6262 * if the extent count or extent size has changed since the last access.
6263 * Callers use this routine post port reset to understand if there is a
6264 * extent reprovisioning requirement.
6267 * -Error: error indicates problem.
6268 * 1: Extent count or size has changed.
6272 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6274 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6275 uint16_t size_diff, rsrc_ext_size;
6277 struct lpfc_rsrc_blks *rsrc_entry;
6278 struct list_head *rsrc_blk_list = NULL;
6282 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6289 case LPFC_RSC_TYPE_FCOE_RPI:
6290 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6292 case LPFC_RSC_TYPE_FCOE_VPI:
6293 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6295 case LPFC_RSC_TYPE_FCOE_XRI:
6296 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6298 case LPFC_RSC_TYPE_FCOE_VFI:
6299 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6305 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6307 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6311 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6318 * lpfc_sli4_cfg_post_extnts -
6319 * @phba: Pointer to HBA context object.
6320 * @extnt_cnt: number of available extents.
6321 * @type: the extent type (rpi, xri, vfi, vpi).
6322 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6323 * @mbox: pointer to the caller's allocated mailbox structure.
6325 * This function executes the extents allocation request. It also
6326 * takes care of the amount of memory needed to allocate or get the
6327 * allocated extents. It is the caller's responsibility to evaluate
6331 * -Error: Error value describes the condition found.
6335 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6336 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6341 uint32_t alloc_len, mbox_tmo;
6343 /* Calculate the total requested length of the dma memory */
6344 req_len = extnt_cnt * sizeof(uint16_t);
6347 * Calculate the size of an embedded mailbox. The uint32_t
6348 * accounts for extents-specific word.
6350 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6354 * Presume the allocation and response will fit into an embedded
6355 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6357 *emb = LPFC_SLI4_MBX_EMBED;
6358 if (req_len > emb_len) {
6359 req_len = extnt_cnt * sizeof(uint16_t) +
6360 sizeof(union lpfc_sli4_cfg_shdr) +
6362 *emb = LPFC_SLI4_MBX_NEMBED;
6365 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6366 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6368 if (alloc_len < req_len) {
6369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6370 "2982 Allocated DMA memory size (x%x) is "
6371 "less than the requested DMA memory "
6372 "size (x%x)\n", alloc_len, req_len);
6375 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6379 if (!phba->sli4_hba.intr_enable)
6380 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6382 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6383 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6392 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6393 * @phba: Pointer to HBA context object.
6394 * @type: The resource extent type to allocate.
6396 * This function allocates the number of elements for the specified
6400 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6403 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6404 uint16_t rsrc_id, rsrc_start, j, k;
6407 unsigned long longs;
6408 unsigned long *bmask;
6409 struct lpfc_rsrc_blks *rsrc_blks;
6412 struct lpfc_id_range *id_array = NULL;
6413 void *virtaddr = NULL;
6414 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6415 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6416 struct list_head *ext_blk_list;
6418 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6424 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6425 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6426 "3009 No available Resource Extents "
6427 "for resource type 0x%x: Count: 0x%x, "
6428 "Size 0x%x\n", type, rsrc_cnt,
6433 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6434 "2903 Post resource extents type-0x%x: "
6435 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6437 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6441 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6448 * Figure out where the response is located. Then get local pointers
6449 * to the response data. The port does not guarantee to respond to
6450 * all extents counts request so update the local variable with the
6451 * allocated count from the port.
6453 if (emb == LPFC_SLI4_MBX_EMBED) {
6454 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6455 id_array = &rsrc_ext->u.rsp.id[0];
6456 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6458 virtaddr = mbox->sge_array->addr[0];
6459 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6460 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6461 id_array = &n_rsrc->id;
6464 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6465 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6468 * Based on the resource size and count, correct the base and max
6471 length = sizeof(struct lpfc_rsrc_blks);
6473 case LPFC_RSC_TYPE_FCOE_RPI:
6474 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6475 sizeof(unsigned long),
6477 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6481 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6484 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6485 kfree(phba->sli4_hba.rpi_bmask);
6491 * The next_rpi was initialized with the maximum available
6492 * count but the port may allocate a smaller number. Catch
6493 * that case and update the next_rpi.
6495 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6497 /* Initialize local ptrs for common extent processing later. */
6498 bmask = phba->sli4_hba.rpi_bmask;
6499 ids = phba->sli4_hba.rpi_ids;
6500 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6502 case LPFC_RSC_TYPE_FCOE_VPI:
6503 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6505 if (unlikely(!phba->vpi_bmask)) {
6509 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6511 if (unlikely(!phba->vpi_ids)) {
6512 kfree(phba->vpi_bmask);
6517 /* Initialize local ptrs for common extent processing later. */
6518 bmask = phba->vpi_bmask;
6519 ids = phba->vpi_ids;
6520 ext_blk_list = &phba->lpfc_vpi_blk_list;
6522 case LPFC_RSC_TYPE_FCOE_XRI:
6523 phba->sli4_hba.xri_bmask = kcalloc(longs,
6524 sizeof(unsigned long),
6526 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6530 phba->sli4_hba.max_cfg_param.xri_used = 0;
6531 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6534 if (unlikely(!phba->sli4_hba.xri_ids)) {
6535 kfree(phba->sli4_hba.xri_bmask);
6540 /* Initialize local ptrs for common extent processing later. */
6541 bmask = phba->sli4_hba.xri_bmask;
6542 ids = phba->sli4_hba.xri_ids;
6543 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6545 case LPFC_RSC_TYPE_FCOE_VFI:
6546 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6547 sizeof(unsigned long),
6549 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6553 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6556 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6557 kfree(phba->sli4_hba.vfi_bmask);
6562 /* Initialize local ptrs for common extent processing later. */
6563 bmask = phba->sli4_hba.vfi_bmask;
6564 ids = phba->sli4_hba.vfi_ids;
6565 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6568 /* Unsupported Opcode. Fail call. */
6572 ext_blk_list = NULL;
6577 * Complete initializing the extent configuration with the
6578 * allocated ids assigned to this function. The bitmask serves
6579 * as an index into the array and manages the available ids. The
6580 * array just stores the ids communicated to the port via the wqes.
6582 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6584 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6587 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6590 rsrc_blks = kzalloc(length, GFP_KERNEL);
6591 if (unlikely(!rsrc_blks)) {
6597 rsrc_blks->rsrc_start = rsrc_id;
6598 rsrc_blks->rsrc_size = rsrc_size;
6599 list_add_tail(&rsrc_blks->list, ext_blk_list);
6600 rsrc_start = rsrc_id;
6601 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6602 phba->sli4_hba.io_xri_start = rsrc_start +
6603 lpfc_sli4_get_iocb_cnt(phba);
6606 while (rsrc_id < (rsrc_start + rsrc_size)) {
6611 /* Entire word processed. Get next word.*/
6616 lpfc_sli4_mbox_cmd_free(phba, mbox);
6623 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6624 * @phba: Pointer to HBA context object.
6625 * @type: the extent's type.
6627 * This function deallocates all extents of a particular resource type.
6628 * SLI4 does not allow for deallocating a particular extent range. It
6629 * is the caller's responsibility to release all kernel memory resources.
6632 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6635 uint32_t length, mbox_tmo = 0;
6637 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6638 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6640 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6645 * This function sends an embedded mailbox because it only sends the
6646 * the resource type. All extents of this type are released by the
6649 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6650 sizeof(struct lpfc_sli4_cfg_mhdr));
6651 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6652 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6653 length, LPFC_SLI4_MBX_EMBED);
6655 /* Send an extents count of 0 - the dealloc doesn't use it. */
6656 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6657 LPFC_SLI4_MBX_EMBED);
6662 if (!phba->sli4_hba.intr_enable)
6663 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6665 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6666 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6673 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6674 if (bf_get(lpfc_mbox_hdr_status,
6675 &dealloc_rsrc->header.cfg_shdr.response)) {
6676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6677 "2919 Failed to release resource extents "
6678 "for type %d - Status 0x%x Add'l Status 0x%x. "
6679 "Resource memory not released.\n",
6681 bf_get(lpfc_mbox_hdr_status,
6682 &dealloc_rsrc->header.cfg_shdr.response),
6683 bf_get(lpfc_mbox_hdr_add_status,
6684 &dealloc_rsrc->header.cfg_shdr.response));
6689 /* Release kernel memory resources for the specific type. */
6691 case LPFC_RSC_TYPE_FCOE_VPI:
6692 kfree(phba->vpi_bmask);
6693 kfree(phba->vpi_ids);
6694 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6695 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6696 &phba->lpfc_vpi_blk_list, list) {
6697 list_del_init(&rsrc_blk->list);
6700 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6702 case LPFC_RSC_TYPE_FCOE_XRI:
6703 kfree(phba->sli4_hba.xri_bmask);
6704 kfree(phba->sli4_hba.xri_ids);
6705 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6706 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6707 list_del_init(&rsrc_blk->list);
6711 case LPFC_RSC_TYPE_FCOE_VFI:
6712 kfree(phba->sli4_hba.vfi_bmask);
6713 kfree(phba->sli4_hba.vfi_ids);
6714 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6715 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6716 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6717 list_del_init(&rsrc_blk->list);
6721 case LPFC_RSC_TYPE_FCOE_RPI:
6722 /* RPI bitmask and physical id array are cleaned up earlier. */
6723 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6724 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6725 list_del_init(&rsrc_blk->list);
6733 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6736 mempool_free(mbox, phba->mbox_mem_pool);
6741 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6747 len = sizeof(struct lpfc_mbx_set_feature) -
6748 sizeof(struct lpfc_sli4_cfg_mhdr);
6749 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6750 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6751 LPFC_SLI4_MBX_EMBED);
6754 case LPFC_SET_UE_RECOVERY:
6755 bf_set(lpfc_mbx_set_feature_UER,
6756 &mbox->u.mqe.un.set_feature, 1);
6757 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6758 mbox->u.mqe.un.set_feature.param_len = 8;
6760 case LPFC_SET_MDS_DIAGS:
6761 bf_set(lpfc_mbx_set_feature_mds,
6762 &mbox->u.mqe.un.set_feature, 1);
6763 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6764 &mbox->u.mqe.un.set_feature, 1);
6765 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6766 mbox->u.mqe.un.set_feature.param_len = 8;
6768 case LPFC_SET_CGN_SIGNAL:
6769 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6772 sig_freq = phba->cgn_sig_freq;
6774 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6775 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6776 &mbox->u.mqe.un.set_feature, sig_freq);
6777 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6778 &mbox->u.mqe.un.set_feature, sig_freq);
6781 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6782 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6783 &mbox->u.mqe.un.set_feature, sig_freq);
6785 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6786 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6789 sig_freq = lpfc_acqe_cgn_frequency;
6791 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6792 &mbox->u.mqe.un.set_feature, sig_freq);
6794 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6795 mbox->u.mqe.un.set_feature.param_len = 12;
6797 case LPFC_SET_DUAL_DUMP:
6798 bf_set(lpfc_mbx_set_feature_dd,
6799 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6800 bf_set(lpfc_mbx_set_feature_ddquery,
6801 &mbox->u.mqe.un.set_feature, 0);
6802 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6803 mbox->u.mqe.un.set_feature.param_len = 4;
6805 case LPFC_SET_ENABLE_MI:
6806 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6807 mbox->u.mqe.un.set_feature.param_len = 4;
6808 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6809 phba->pport->cfg_lun_queue_depth);
6810 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6811 phba->sli4_hba.pc_sli4_params.mi_ver);
6813 case LPFC_SET_ENABLE_CMF:
6814 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6815 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6816 mbox->u.mqe.un.set_feature.param_len = 4;
6817 bf_set(lpfc_mbx_set_feature_cmf,
6818 &mbox->u.mqe.un.set_feature, 1);
6825 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6826 * @phba: Pointer to HBA context object.
6828 * Disable FW logging into host memory on the adapter. To
6829 * be done before reading logs from the host memory.
6832 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6834 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6836 spin_lock_irq(&phba->hbalock);
6837 ras_fwlog->state = INACTIVE;
6838 spin_unlock_irq(&phba->hbalock);
6840 /* Disable FW logging to host memory */
6841 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6842 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6844 /* Wait 10ms for firmware to stop using DMA buffer */
6845 usleep_range(10 * 1000, 20 * 1000);
6849 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6850 * @phba: Pointer to HBA context object.
6852 * This function is called to free memory allocated for RAS FW logging
6853 * support in the driver.
6856 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6858 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6859 struct lpfc_dmabuf *dmabuf, *next;
6861 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6862 list_for_each_entry_safe(dmabuf, next,
6863 &ras_fwlog->fwlog_buff_list,
6865 list_del(&dmabuf->list);
6866 dma_free_coherent(&phba->pcidev->dev,
6867 LPFC_RAS_MAX_ENTRY_SIZE,
6868 dmabuf->virt, dmabuf->phys);
6873 if (ras_fwlog->lwpd.virt) {
6874 dma_free_coherent(&phba->pcidev->dev,
6875 sizeof(uint32_t) * 2,
6876 ras_fwlog->lwpd.virt,
6877 ras_fwlog->lwpd.phys);
6878 ras_fwlog->lwpd.virt = NULL;
6881 spin_lock_irq(&phba->hbalock);
6882 ras_fwlog->state = INACTIVE;
6883 spin_unlock_irq(&phba->hbalock);
6887 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6888 * @phba: Pointer to HBA context object.
6889 * @fwlog_buff_count: Count of buffers to be created.
6891 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6892 * to update FW log is posted to the adapter.
6893 * Buffer count is calculated based on module param ras_fwlog_buffsize
6894 * Size of each buffer posted to FW is 64K.
6898 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6899 uint32_t fwlog_buff_count)
6901 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6902 struct lpfc_dmabuf *dmabuf;
6905 /* Initialize List */
6906 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6908 /* Allocate memory for the LWPD */
6909 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6910 sizeof(uint32_t) * 2,
6911 &ras_fwlog->lwpd.phys,
6913 if (!ras_fwlog->lwpd.virt) {
6914 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6915 "6185 LWPD Memory Alloc Failed\n");
6920 ras_fwlog->fw_buffcount = fwlog_buff_count;
6921 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6922 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6926 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6927 "6186 Memory Alloc failed FW logging");
6931 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6932 LPFC_RAS_MAX_ENTRY_SIZE,
6933 &dmabuf->phys, GFP_KERNEL);
6934 if (!dmabuf->virt) {
6937 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6938 "6187 DMA Alloc Failed FW logging");
6941 dmabuf->buffer_tag = i;
6942 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6947 lpfc_sli4_ras_dma_free(phba);
6953 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6954 * @phba: pointer to lpfc hba data structure.
6955 * @pmb: pointer to the driver internal queue element for mailbox command.
6957 * Completion handler for driver's RAS MBX command to the device.
6960 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6963 union lpfc_sli4_cfg_shdr *shdr;
6964 uint32_t shdr_status, shdr_add_status;
6965 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6969 shdr = (union lpfc_sli4_cfg_shdr *)
6970 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6971 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6972 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6974 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6976 "6188 FW LOG mailbox "
6977 "completed with status x%x add_status x%x,"
6978 " mbx status x%x\n",
6979 shdr_status, shdr_add_status, mb->mbxStatus);
6981 ras_fwlog->ras_hwsupport = false;
6985 spin_lock_irq(&phba->hbalock);
6986 ras_fwlog->state = ACTIVE;
6987 spin_unlock_irq(&phba->hbalock);
6988 mempool_free(pmb, phba->mbox_mem_pool);
6993 /* Free RAS DMA memory */
6994 lpfc_sli4_ras_dma_free(phba);
6995 mempool_free(pmb, phba->mbox_mem_pool);
6999 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7000 * @phba: pointer to lpfc hba data structure.
7001 * @fwlog_level: Logging verbosity level.
7002 * @fwlog_enable: Enable/Disable logging.
7004 * Initialize memory and post mailbox command to enable FW logging in host
7008 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7009 uint32_t fwlog_level,
7010 uint32_t fwlog_enable)
7012 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7013 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7014 struct lpfc_dmabuf *dmabuf;
7016 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7019 spin_lock_irq(&phba->hbalock);
7020 ras_fwlog->state = INACTIVE;
7021 spin_unlock_irq(&phba->hbalock);
7023 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7024 phba->cfg_ras_fwlog_buffsize);
7025 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7028 * If re-enabling FW logging support use earlier allocated
7029 * DMA buffers while posting MBX command.
7031 if (!ras_fwlog->lwpd.virt) {
7032 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7034 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7035 "6189 FW Log Memory Allocation Failed");
7040 /* Setup Mailbox command */
7041 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7044 "6190 RAS MBX Alloc Failed");
7049 ras_fwlog->fw_loglevel = fwlog_level;
7050 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7051 sizeof(struct lpfc_sli4_cfg_mhdr));
7053 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7054 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7055 len, LPFC_SLI4_MBX_EMBED);
7057 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7058 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7060 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7061 ras_fwlog->fw_loglevel);
7062 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7063 ras_fwlog->fw_buffcount);
7064 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7065 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7067 /* Update DMA buffer address */
7068 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7069 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7071 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7072 putPaddrLow(dmabuf->phys);
7074 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7075 putPaddrHigh(dmabuf->phys);
7078 /* Update LPWD address */
7079 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7080 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7082 spin_lock_irq(&phba->hbalock);
7083 ras_fwlog->state = REG_INPROGRESS;
7084 spin_unlock_irq(&phba->hbalock);
7085 mbox->vport = phba->pport;
7086 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7088 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7090 if (rc == MBX_NOT_FINISHED) {
7091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7092 "6191 FW-Log Mailbox failed. "
7093 "status %d mbxStatus : x%x", rc,
7094 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7095 mempool_free(mbox, phba->mbox_mem_pool);
7102 lpfc_sli4_ras_dma_free(phba);
7108 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7109 * @phba: Pointer to HBA context object.
7111 * Check if RAS is supported on the adapter and initialize it.
7114 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7116 /* Check RAS FW Log needs to be enabled or not */
7117 if (lpfc_check_fwlog_support(phba))
7120 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7121 LPFC_RAS_ENABLE_LOGGING);
7125 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7126 * @phba: Pointer to HBA context object.
7128 * This function allocates all SLI4 resource identifiers.
7131 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7133 int i, rc, error = 0;
7134 uint16_t count, base;
7135 unsigned long longs;
7137 if (!phba->sli4_hba.rpi_hdrs_in_use)
7138 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7139 if (phba->sli4_hba.extents_in_use) {
7141 * The port supports resource extents. The XRI, VPI, VFI, RPI
7142 * resource extent count must be read and allocated before
7143 * provisioning the resource id arrays.
7145 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7146 LPFC_IDX_RSRC_RDY) {
7148 * Extent-based resources are set - the driver could
7149 * be in a port reset. Figure out if any corrective
7150 * actions need to be taken.
7152 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7153 LPFC_RSC_TYPE_FCOE_VFI);
7156 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7157 LPFC_RSC_TYPE_FCOE_VPI);
7160 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7161 LPFC_RSC_TYPE_FCOE_XRI);
7164 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7165 LPFC_RSC_TYPE_FCOE_RPI);
7170 * It's possible that the number of resources
7171 * provided to this port instance changed between
7172 * resets. Detect this condition and reallocate
7173 * resources. Otherwise, there is no action.
7176 lpfc_printf_log(phba, KERN_INFO,
7177 LOG_MBOX | LOG_INIT,
7178 "2931 Detected extent resource "
7179 "change. Reallocating all "
7181 rc = lpfc_sli4_dealloc_extent(phba,
7182 LPFC_RSC_TYPE_FCOE_VFI);
7183 rc = lpfc_sli4_dealloc_extent(phba,
7184 LPFC_RSC_TYPE_FCOE_VPI);
7185 rc = lpfc_sli4_dealloc_extent(phba,
7186 LPFC_RSC_TYPE_FCOE_XRI);
7187 rc = lpfc_sli4_dealloc_extent(phba,
7188 LPFC_RSC_TYPE_FCOE_RPI);
7193 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7197 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7201 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7205 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7208 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7213 * The port does not support resource extents. The XRI, VPI,
7214 * VFI, RPI resource ids were determined from READ_CONFIG.
7215 * Just allocate the bitmasks and provision the resource id
7216 * arrays. If a port reset is active, the resources don't
7217 * need any action - just exit.
7219 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7220 LPFC_IDX_RSRC_RDY) {
7221 lpfc_sli4_dealloc_resource_identifiers(phba);
7222 lpfc_sli4_remove_rpis(phba);
7225 count = phba->sli4_hba.max_cfg_param.max_rpi;
7227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7228 "3279 Invalid provisioning of "
7233 base = phba->sli4_hba.max_cfg_param.rpi_base;
7234 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7235 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7236 sizeof(unsigned long),
7238 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7242 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7244 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7246 goto free_rpi_bmask;
7249 for (i = 0; i < count; i++)
7250 phba->sli4_hba.rpi_ids[i] = base + i;
7253 count = phba->sli4_hba.max_cfg_param.max_vpi;
7255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7256 "3280 Invalid provisioning of "
7261 base = phba->sli4_hba.max_cfg_param.vpi_base;
7262 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7263 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7265 if (unlikely(!phba->vpi_bmask)) {
7269 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7271 if (unlikely(!phba->vpi_ids)) {
7273 goto free_vpi_bmask;
7276 for (i = 0; i < count; i++)
7277 phba->vpi_ids[i] = base + i;
7280 count = phba->sli4_hba.max_cfg_param.max_xri;
7282 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7283 "3281 Invalid provisioning of "
7288 base = phba->sli4_hba.max_cfg_param.xri_base;
7289 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7290 phba->sli4_hba.xri_bmask = kcalloc(longs,
7291 sizeof(unsigned long),
7293 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7297 phba->sli4_hba.max_cfg_param.xri_used = 0;
7298 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7300 if (unlikely(!phba->sli4_hba.xri_ids)) {
7302 goto free_xri_bmask;
7305 for (i = 0; i < count; i++)
7306 phba->sli4_hba.xri_ids[i] = base + i;
7309 count = phba->sli4_hba.max_cfg_param.max_vfi;
7311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7312 "3282 Invalid provisioning of "
7317 base = phba->sli4_hba.max_cfg_param.vfi_base;
7318 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7319 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7320 sizeof(unsigned long),
7322 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7326 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7328 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7330 goto free_vfi_bmask;
7333 for (i = 0; i < count; i++)
7334 phba->sli4_hba.vfi_ids[i] = base + i;
7337 * Mark all resources ready. An HBA reset doesn't need
7338 * to reset the initialization.
7340 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7346 kfree(phba->sli4_hba.vfi_bmask);
7347 phba->sli4_hba.vfi_bmask = NULL;
7349 kfree(phba->sli4_hba.xri_ids);
7350 phba->sli4_hba.xri_ids = NULL;
7352 kfree(phba->sli4_hba.xri_bmask);
7353 phba->sli4_hba.xri_bmask = NULL;
7355 kfree(phba->vpi_ids);
7356 phba->vpi_ids = NULL;
7358 kfree(phba->vpi_bmask);
7359 phba->vpi_bmask = NULL;
7361 kfree(phba->sli4_hba.rpi_ids);
7362 phba->sli4_hba.rpi_ids = NULL;
7364 kfree(phba->sli4_hba.rpi_bmask);
7365 phba->sli4_hba.rpi_bmask = NULL;
7371 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7372 * @phba: Pointer to HBA context object.
7374 * This function allocates the number of elements for the specified
7378 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7380 if (phba->sli4_hba.extents_in_use) {
7381 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7382 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7383 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7384 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7386 kfree(phba->vpi_bmask);
7387 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7388 kfree(phba->vpi_ids);
7389 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7390 kfree(phba->sli4_hba.xri_bmask);
7391 kfree(phba->sli4_hba.xri_ids);
7392 kfree(phba->sli4_hba.vfi_bmask);
7393 kfree(phba->sli4_hba.vfi_ids);
7394 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7395 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7402 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7403 * @phba: Pointer to HBA context object.
7404 * @type: The resource extent type.
7405 * @extnt_cnt: buffer to hold port extent count response
7406 * @extnt_size: buffer to hold port extent size response.
7408 * This function calls the port to read the host allocated extents
7409 * for a particular type.
7412 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7413 uint16_t *extnt_cnt, uint16_t *extnt_size)
7417 uint16_t curr_blks = 0;
7418 uint32_t req_len, emb_len;
7419 uint32_t alloc_len, mbox_tmo;
7420 struct list_head *blk_list_head;
7421 struct lpfc_rsrc_blks *rsrc_blk;
7423 void *virtaddr = NULL;
7424 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7425 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7426 union lpfc_sli4_cfg_shdr *shdr;
7429 case LPFC_RSC_TYPE_FCOE_VPI:
7430 blk_list_head = &phba->lpfc_vpi_blk_list;
7432 case LPFC_RSC_TYPE_FCOE_XRI:
7433 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7435 case LPFC_RSC_TYPE_FCOE_VFI:
7436 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7438 case LPFC_RSC_TYPE_FCOE_RPI:
7439 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7445 /* Count the number of extents currently allocatd for this type. */
7446 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7447 if (curr_blks == 0) {
7449 * The GET_ALLOCATED mailbox does not return the size,
7450 * just the count. The size should be just the size
7451 * stored in the current allocated block and all sizes
7452 * for an extent type are the same so set the return
7455 *extnt_size = rsrc_blk->rsrc_size;
7461 * Calculate the size of an embedded mailbox. The uint32_t
7462 * accounts for extents-specific word.
7464 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7468 * Presume the allocation and response will fit into an embedded
7469 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7471 emb = LPFC_SLI4_MBX_EMBED;
7473 if (req_len > emb_len) {
7474 req_len = curr_blks * sizeof(uint16_t) +
7475 sizeof(union lpfc_sli4_cfg_shdr) +
7477 emb = LPFC_SLI4_MBX_NEMBED;
7480 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7483 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7485 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7486 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7488 if (alloc_len < req_len) {
7489 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7490 "2983 Allocated DMA memory size (x%x) is "
7491 "less than the requested DMA memory "
7492 "size (x%x)\n", alloc_len, req_len);
7496 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7502 if (!phba->sli4_hba.intr_enable)
7503 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7505 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7506 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7515 * Figure out where the response is located. Then get local pointers
7516 * to the response data. The port does not guarantee to respond to
7517 * all extents counts request so update the local variable with the
7518 * allocated count from the port.
7520 if (emb == LPFC_SLI4_MBX_EMBED) {
7521 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7522 shdr = &rsrc_ext->header.cfg_shdr;
7523 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7525 virtaddr = mbox->sge_array->addr[0];
7526 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7527 shdr = &n_rsrc->cfg_shdr;
7528 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7531 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7533 "2984 Failed to read allocated resources "
7534 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7536 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7537 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7542 lpfc_sli4_mbox_cmd_free(phba, mbox);
7547 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7548 * @phba: pointer to lpfc hba data structure.
7549 * @sgl_list: linked link of sgl buffers to post
7550 * @cnt: number of linked list buffers
7552 * This routine walks the list of buffers that have been allocated and
7553 * repost them to the port by using SGL block post. This is needed after a
7554 * pci_function_reset/warm_start or start. It attempts to construct blocks
7555 * of buffer sgls which contains contiguous xris and uses the non-embedded
7556 * SGL block post mailbox commands to post them to the port. For single
7557 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7558 * mailbox command for posting.
7560 * Returns: 0 = success, non-zero failure.
7563 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7564 struct list_head *sgl_list, int cnt)
7566 struct lpfc_sglq *sglq_entry = NULL;
7567 struct lpfc_sglq *sglq_entry_next = NULL;
7568 struct lpfc_sglq *sglq_entry_first = NULL;
7569 int status, total_cnt;
7570 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7571 int last_xritag = NO_XRI;
7572 LIST_HEAD(prep_sgl_list);
7573 LIST_HEAD(blck_sgl_list);
7574 LIST_HEAD(allc_sgl_list);
7575 LIST_HEAD(post_sgl_list);
7576 LIST_HEAD(free_sgl_list);
7578 spin_lock_irq(&phba->hbalock);
7579 spin_lock(&phba->sli4_hba.sgl_list_lock);
7580 list_splice_init(sgl_list, &allc_sgl_list);
7581 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7582 spin_unlock_irq(&phba->hbalock);
7585 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7586 &allc_sgl_list, list) {
7587 list_del_init(&sglq_entry->list);
7589 if ((last_xritag != NO_XRI) &&
7590 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7591 /* a hole in xri block, form a sgl posting block */
7592 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7593 post_cnt = block_cnt - 1;
7594 /* prepare list for next posting block */
7595 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7598 /* prepare list for next posting block */
7599 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7600 /* enough sgls for non-embed sgl mbox command */
7601 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7602 list_splice_init(&prep_sgl_list,
7604 post_cnt = block_cnt;
7610 /* keep track of last sgl's xritag */
7611 last_xritag = sglq_entry->sli4_xritag;
7613 /* end of repost sgl list condition for buffers */
7614 if (num_posted == total_cnt) {
7615 if (post_cnt == 0) {
7616 list_splice_init(&prep_sgl_list,
7618 post_cnt = block_cnt;
7619 } else if (block_cnt == 1) {
7620 status = lpfc_sli4_post_sgl(phba,
7621 sglq_entry->phys, 0,
7622 sglq_entry->sli4_xritag);
7624 /* successful, put sgl to posted list */
7625 list_add_tail(&sglq_entry->list,
7628 /* Failure, put sgl to free list */
7629 lpfc_printf_log(phba, KERN_WARNING,
7631 "3159 Failed to post "
7632 "sgl, xritag:x%x\n",
7633 sglq_entry->sli4_xritag);
7634 list_add_tail(&sglq_entry->list,
7641 /* continue until a nembed page worth of sgls */
7645 /* post the buffer list sgls as a block */
7646 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7650 /* success, put sgl list to posted sgl list */
7651 list_splice_init(&blck_sgl_list, &post_sgl_list);
7653 /* Failure, put sgl list to free sgl list */
7654 sglq_entry_first = list_first_entry(&blck_sgl_list,
7657 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7658 "3160 Failed to post sgl-list, "
7660 sglq_entry_first->sli4_xritag,
7661 (sglq_entry_first->sli4_xritag +
7663 list_splice_init(&blck_sgl_list, &free_sgl_list);
7664 total_cnt -= post_cnt;
7667 /* don't reset xirtag due to hole in xri block */
7669 last_xritag = NO_XRI;
7671 /* reset sgl post count for next round of posting */
7675 /* free the sgls failed to post */
7676 lpfc_free_sgl_list(phba, &free_sgl_list);
7678 /* push sgls posted to the available list */
7679 if (!list_empty(&post_sgl_list)) {
7680 spin_lock_irq(&phba->hbalock);
7681 spin_lock(&phba->sli4_hba.sgl_list_lock);
7682 list_splice_init(&post_sgl_list, sgl_list);
7683 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7684 spin_unlock_irq(&phba->hbalock);
7686 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7687 "3161 Failure to post sgl to port.\n");
7691 /* return the number of XRIs actually posted */
7696 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7697 * @phba: pointer to lpfc hba data structure.
7699 * This routine walks the list of nvme buffers that have been allocated and
7700 * repost them to the port by using SGL block post. This is needed after a
7701 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7702 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7703 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7705 * Returns: 0 = success, non-zero failure.
7708 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7710 LIST_HEAD(post_nblist);
7711 int num_posted, rc = 0;
7713 /* get all NVME buffers need to repost to a local list */
7714 lpfc_io_buf_flush(phba, &post_nblist);
7716 /* post the list of nvme buffer sgls to port if available */
7717 if (!list_empty(&post_nblist)) {
7718 num_posted = lpfc_sli4_post_io_sgl_list(
7719 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7720 /* failed to post any nvme buffer, return error */
7721 if (num_posted == 0)
7728 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7732 len = sizeof(struct lpfc_mbx_set_host_data) -
7733 sizeof(struct lpfc_sli4_cfg_mhdr);
7734 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7735 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7736 LPFC_SLI4_MBX_EMBED);
7738 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7739 mbox->u.mqe.un.set_host_data.param_len =
7740 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7741 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7742 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7743 "Linux %s v"LPFC_DRIVER_VERSION,
7744 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7748 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7749 struct lpfc_queue *drq, int count, int idx)
7752 struct lpfc_rqe hrqe;
7753 struct lpfc_rqe drqe;
7754 struct lpfc_rqb *rqbp;
7755 unsigned long flags;
7756 struct rqb_dmabuf *rqb_buffer;
7757 LIST_HEAD(rqb_buf_list);
7760 for (i = 0; i < count; i++) {
7761 spin_lock_irqsave(&phba->hbalock, flags);
7762 /* IF RQ is already full, don't bother */
7763 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7764 spin_unlock_irqrestore(&phba->hbalock, flags);
7767 spin_unlock_irqrestore(&phba->hbalock, flags);
7769 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7772 rqb_buffer->hrq = hrq;
7773 rqb_buffer->drq = drq;
7774 rqb_buffer->idx = idx;
7775 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7778 spin_lock_irqsave(&phba->hbalock, flags);
7779 while (!list_empty(&rqb_buf_list)) {
7780 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7783 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7784 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7785 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7786 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7787 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7789 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7790 "6421 Cannot post to HRQ %d: %x %x %x "
7798 rqbp->rqb_free_buffer(phba, rqb_buffer);
7800 list_add_tail(&rqb_buffer->hbuf.list,
7801 &rqbp->rqb_buffer_list);
7802 rqbp->buffer_count++;
7805 spin_unlock_irqrestore(&phba->hbalock, flags);
7810 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7812 struct lpfc_vport *vport = pmb->vport;
7813 union lpfc_sli4_cfg_shdr *shdr;
7814 u32 shdr_status, shdr_add_status;
7817 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7818 * is done. (2) Mailbox failed and send FPIN support only.
7820 shdr = (union lpfc_sli4_cfg_shdr *)
7821 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7822 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7823 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7824 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7826 "2516 CGN SET_FEATURE mbox failed with "
7827 "status x%x add_status x%x, mbx status x%x "
7828 "Reset Congestion to FPINs only\n",
7829 shdr_status, shdr_add_status,
7830 pmb->u.mb.mbxStatus);
7831 /* If there is a mbox error, move on to RDF */
7832 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7833 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7837 /* Zero out Congestion Signal ACQE counter */
7838 phba->cgn_acqe_cnt = 0;
7840 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7841 &pmb->u.mqe.un.set_feature);
7842 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7843 &pmb->u.mqe.un.set_feature);
7844 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7845 "4620 SET_FEATURES Success: Freq: %ds %dms "
7846 " Reg: x%x x%x\n", acqe, sig,
7847 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7849 mempool_free(pmb, phba->mbox_mem_pool);
7851 /* Register for FPIN events from the fabric now that the
7852 * EDC common_set_features has completed.
7854 lpfc_issue_els_rdf(vport, 0);
7858 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7860 LPFC_MBOXQ_t *mboxq;
7863 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7867 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7868 mboxq->vport = phba->pport;
7869 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7871 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7872 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7874 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7875 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7877 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7878 if (rc == MBX_NOT_FINISHED)
7883 mempool_free(mboxq, phba->mbox_mem_pool);
7885 /* If there is a mbox error, move on to RDF */
7886 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7887 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7888 lpfc_issue_els_rdf(phba->pport, 0);
7893 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7894 * @phba: pointer to lpfc hba data structure.
7896 * This routine initializes the per-cq idle_stat to dynamically dictate
7897 * polling decisions.
7902 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7905 struct lpfc_sli4_hdw_queue *hdwq;
7906 struct lpfc_queue *cq;
7907 struct lpfc_idle_stat *idle_stat;
7910 for_each_present_cpu(i) {
7911 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7914 /* Skip if we've already handled this cq's primary CPU */
7918 idle_stat = &phba->sli4_hba.idle_stat[i];
7920 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7921 idle_stat->prev_wall = wall;
7923 if (phba->nvmet_support ||
7924 phba->cmf_active_mode != LPFC_CFG_OFF)
7925 cq->poll_mode = LPFC_QUEUE_WORK;
7927 cq->poll_mode = LPFC_IRQ_POLL;
7930 if (!phba->nvmet_support)
7931 schedule_delayed_work(&phba->idle_stat_delay_work,
7932 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7935 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7939 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7940 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7941 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7942 struct lpfc_register reg_data;
7944 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7948 if (bf_get(lpfc_sliport_status_dip, ®_data))
7949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7950 "2904 Firmware Dump Image Present"
7956 * lpfc_cmf_setup - Initialize idle_stat tracking
7957 * @phba: Pointer to HBA context object.
7959 * This is called from HBA setup during driver load or when the HBA
7960 * comes online. this does all the initialization to support CMF and MI.
7963 lpfc_cmf_setup(struct lpfc_hba *phba)
7965 LPFC_MBOXQ_t *mboxq;
7966 struct lpfc_dmabuf *mp;
7967 struct lpfc_pc_sli4_params *sli4_params;
7968 int rc, cmf, mi_ver;
7970 rc = lpfc_sli4_refresh_params(phba);
7974 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7978 sli4_params = &phba->sli4_hba.pc_sli4_params;
7980 /* Always try to enable MI feature if we can */
7981 if (sli4_params->mi_ver) {
7982 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7983 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7984 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7985 &mboxq->u.mqe.un.set_feature);
7987 if (rc == MBX_SUCCESS) {
7989 lpfc_printf_log(phba,
7990 KERN_WARNING, LOG_CGN_MGMT,
7991 "6215 MI is enabled\n");
7992 sli4_params->mi_ver = mi_ver;
7994 lpfc_printf_log(phba,
7995 KERN_WARNING, LOG_CGN_MGMT,
7996 "6338 MI is disabled\n");
7997 sli4_params->mi_ver = 0;
8000 /* mi_ver is already set from GET_SLI4_PARAMETERS */
8001 lpfc_printf_log(phba, KERN_INFO,
8002 LOG_CGN_MGMT | LOG_INIT,
8003 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8004 "failed, rc:x%x mi:x%x\n",
8005 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8006 lpfc_sli_config_mbox_subsys_get
8008 lpfc_sli_config_mbox_opcode_get
8010 rc, sli4_params->mi_ver);
8013 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8014 "6217 MI is disabled\n");
8017 /* Ensure FDMI is enabled for MI if enable_mi is set */
8018 if (sli4_params->mi_ver)
8019 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8021 /* Always try to enable CMF feature if we can */
8022 if (sli4_params->cmf) {
8023 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8025 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8026 &mboxq->u.mqe.un.set_feature);
8027 if (rc == MBX_SUCCESS && cmf) {
8028 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8029 "6218 CMF is enabled: mode %d\n",
8030 phba->cmf_active_mode);
8032 lpfc_printf_log(phba, KERN_WARNING,
8033 LOG_CGN_MGMT | LOG_INIT,
8034 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8035 "failed, rc:x%x dd:x%x\n",
8036 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8037 lpfc_sli_config_mbox_subsys_get
8039 lpfc_sli_config_mbox_opcode_get
8042 sli4_params->cmf = 0;
8043 phba->cmf_active_mode = LPFC_CFG_OFF;
8047 /* Allocate Congestion Information Buffer */
8049 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8051 mp->virt = dma_alloc_coherent
8052 (&phba->pcidev->dev,
8053 sizeof(struct lpfc_cgn_info),
8054 &mp->phys, GFP_KERNEL);
8055 if (!mp || !mp->virt) {
8056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8057 "2640 Failed to alloc memory "
8058 "for Congestion Info\n");
8060 sli4_params->cmf = 0;
8061 phba->cmf_active_mode = LPFC_CFG_OFF;
8066 /* initialize congestion buffer info */
8067 lpfc_init_congestion_buf(phba);
8068 lpfc_init_congestion_stat(phba);
8070 /* Zero out Congestion Signal counters */
8071 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8072 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8075 rc = lpfc_sli4_cgn_params_read(phba);
8077 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8078 "6242 Error reading Cgn Params (%d)\n",
8080 /* Ensure CGN Mode is off */
8081 sli4_params->cmf = 0;
8083 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8084 "6243 CGN Event empty object.\n");
8085 /* Ensure CGN Mode is off */
8086 sli4_params->cmf = 0;
8090 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8091 "6220 CMF is disabled\n");
8094 /* Only register congestion buffer with firmware if BOTH
8095 * CMF and E2E are enabled.
8097 if (sli4_params->cmf && sli4_params->mi_ver) {
8098 rc = lpfc_reg_congestion_buf(phba);
8100 dma_free_coherent(&phba->pcidev->dev,
8101 sizeof(struct lpfc_cgn_info),
8102 phba->cgn_i->virt, phba->cgn_i->phys);
8105 /* Ensure CGN Mode is off */
8106 phba->cmf_active_mode = LPFC_CFG_OFF;
8110 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8111 "6470 Setup MI version %d CMF %d mode %d\n",
8112 sli4_params->mi_ver, sli4_params->cmf,
8113 phba->cmf_active_mode);
8115 mempool_free(mboxq, phba->mbox_mem_pool);
8117 /* Initialize atomic counters */
8118 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8119 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8120 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8121 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8122 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8123 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8124 atomic64_set(&phba->cgn_latency_evt, 0);
8126 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8128 /* Allocate RX Monitor Buffer */
8129 if (!phba->rxtable) {
8130 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8131 sizeof(struct rxtable_entry),
8133 if (!phba->rxtable) {
8134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8135 "2644 Failed to alloc memory "
8136 "for RX Monitor Buffer\n");
8140 atomic_set(&phba->rxtable_idx_head, 0);
8141 atomic_set(&phba->rxtable_idx_tail, 0);
8146 lpfc_set_host_tm(struct lpfc_hba *phba)
8148 LPFC_MBOXQ_t *mboxq;
8150 struct timespec64 cur_time;
8152 uint32_t month, day, year;
8153 uint32_t hour, minute, second;
8154 struct lpfc_mbx_set_host_date_time *tm;
8156 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8160 len = sizeof(struct lpfc_mbx_set_host_data) -
8161 sizeof(struct lpfc_sli4_cfg_mhdr);
8162 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8163 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8164 LPFC_SLI4_MBX_EMBED);
8166 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8167 mboxq->u.mqe.un.set_host_data.param_len =
8168 sizeof(struct lpfc_mbx_set_host_date_time);
8169 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8170 ktime_get_real_ts64(&cur_time);
8171 time64_to_tm(cur_time.tv_sec, 0, &broken);
8172 month = broken.tm_mon + 1;
8173 day = broken.tm_mday;
8174 year = broken.tm_year - 100;
8175 hour = broken.tm_hour;
8176 minute = broken.tm_min;
8177 second = broken.tm_sec;
8178 bf_set(lpfc_mbx_set_host_month, tm, month);
8179 bf_set(lpfc_mbx_set_host_day, tm, day);
8180 bf_set(lpfc_mbx_set_host_year, tm, year);
8181 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8182 bf_set(lpfc_mbx_set_host_min, tm, minute);
8183 bf_set(lpfc_mbx_set_host_sec, tm, second);
8185 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8186 mempool_free(mboxq, phba->mbox_mem_pool);
8191 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8192 * @phba: Pointer to HBA context object.
8194 * This function is the main SLI4 device initialization PCI function. This
8195 * function is called by the HBA initialization code, HBA reset code and
8196 * HBA error attention handler code. Caller is not required to hold any
8200 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8202 int rc, i, cnt, len, dd;
8203 LPFC_MBOXQ_t *mboxq;
8204 struct lpfc_mqe *mqe;
8207 uint32_t ftr_rsp = 0;
8208 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8209 struct lpfc_vport *vport = phba->pport;
8210 struct lpfc_dmabuf *mp;
8211 struct lpfc_rqb *rqbp;
8214 /* Perform a PCI function reset to start from clean */
8215 rc = lpfc_pci_function_reset(phba);
8219 /* Check the HBA Host Status Register for readyness */
8220 rc = lpfc_sli4_post_status_check(phba);
8224 spin_lock_irq(&phba->hbalock);
8225 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8226 flg = phba->sli.sli_flag;
8227 spin_unlock_irq(&phba->hbalock);
8228 /* Allow a little time after setting SLI_ACTIVE for any polled
8229 * MBX commands to complete via BSG.
8231 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8233 spin_lock_irq(&phba->hbalock);
8234 flg = phba->sli.sli_flag;
8235 spin_unlock_irq(&phba->hbalock);
8239 lpfc_sli4_dip(phba);
8242 * Allocate a single mailbox container for initializing the
8245 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8249 /* Issue READ_REV to collect vpd and FW information. */
8250 vpd_size = SLI4_PAGE_SIZE;
8251 vpd = kzalloc(vpd_size, GFP_KERNEL);
8257 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8263 mqe = &mboxq->u.mqe;
8264 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8265 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8266 phba->hba_flag |= HBA_FCOE_MODE;
8267 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8269 phba->hba_flag &= ~HBA_FCOE_MODE;
8272 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8274 phba->hba_flag |= HBA_FIP_SUPPORT;
8276 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8278 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8280 if (phba->sli_rev != LPFC_SLI_REV4) {
8281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8282 "0376 READ_REV Error. SLI Level %d "
8283 "FCoE enabled %d\n",
8284 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8290 rc = lpfc_set_host_tm(phba);
8291 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8292 "6468 Set host date / time: Status x%x:\n", rc);
8295 * Continue initialization with default values even if driver failed
8296 * to read FCoE param config regions, only read parameters if the
8299 if (phba->hba_flag & HBA_FCOE_MODE &&
8300 lpfc_sli4_read_fcoe_params(phba))
8301 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8302 "2570 Failed to read FCoE parameters\n");
8305 * Retrieve sli4 device physical port name, failure of doing it
8306 * is considered as non-fatal.
8308 rc = lpfc_sli4_retrieve_pport_name(phba);
8310 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8311 "3080 Successful retrieving SLI4 device "
8312 "physical port name: %s.\n", phba->Port);
8314 rc = lpfc_sli4_get_ctl_attr(phba);
8316 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8317 "8351 Successful retrieving SLI4 device "
8321 * Evaluate the read rev and vpd data. Populate the driver
8322 * state with the results. If this routine fails, the failure
8323 * is not fatal as the driver will use generic values.
8325 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8326 if (unlikely(!rc)) {
8327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8328 "0377 Error %d parsing vpd. "
8329 "Using defaults.\n", rc);
8334 /* Save information as VPD data */
8335 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8336 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8339 * This is because first G7 ASIC doesn't support the standard
8340 * 0x5a NVME cmd descriptor type/subtype
8342 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8343 LPFC_SLI_INTF_IF_TYPE_6) &&
8344 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8345 (phba->vpd.rev.smRev == 0) &&
8346 (phba->cfg_nvme_embed_cmd == 1))
8347 phba->cfg_nvme_embed_cmd = 0;
8349 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8350 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8352 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8354 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8356 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8358 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8359 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8360 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8361 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8362 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8363 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8364 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8365 "(%d):0380 READ_REV Status x%x "
8366 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8367 mboxq->vport ? mboxq->vport->vpi : 0,
8368 bf_get(lpfc_mqe_status, mqe),
8369 phba->vpd.rev.opFwName,
8370 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8371 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8373 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8374 LPFC_SLI_INTF_IF_TYPE_0) {
8375 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8377 if (rc == MBX_SUCCESS) {
8378 phba->hba_flag |= HBA_RECOVERABLE_UE;
8379 /* Set 1Sec interval to detect UE */
8380 phba->eratt_poll_interval = 1;
8381 phba->sli4_hba.ue_to_sr = bf_get(
8382 lpfc_mbx_set_feature_UESR,
8383 &mboxq->u.mqe.un.set_feature);
8384 phba->sli4_hba.ue_to_rp = bf_get(
8385 lpfc_mbx_set_feature_UERP,
8386 &mboxq->u.mqe.un.set_feature);
8390 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8391 /* Enable MDS Diagnostics only if the SLI Port supports it */
8392 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8394 if (rc != MBX_SUCCESS)
8395 phba->mds_diags_support = 0;
8399 * Discover the port's supported feature set and match it against the
8402 lpfc_request_features(phba, mboxq);
8403 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8409 /* Disable VMID if app header is not supported */
8410 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8411 &mqe->un.req_ftrs))) {
8412 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8413 phba->cfg_vmid_app_header = 0;
8414 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8415 "1242 vmid feature not supported\n");
8419 * The port must support FCP initiator mode as this is the
8420 * only mode running in the host.
8422 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8423 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8424 "0378 No support for fcpi mode.\n");
8428 /* Performance Hints are ONLY for FCoE */
8429 if (phba->hba_flag & HBA_FCOE_MODE) {
8430 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8431 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8433 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8437 * If the port cannot support the host's requested features
8438 * then turn off the global config parameters to disable the
8439 * feature in the driver. This is not a fatal error.
8441 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8442 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8443 phba->cfg_enable_bg = 0;
8444 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8449 if (phba->max_vpi && phba->cfg_enable_npiv &&
8450 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8454 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8455 "0379 Feature Mismatch Data: x%08x %08x "
8456 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8457 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8458 phba->cfg_enable_npiv, phba->max_vpi);
8459 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8460 phba->cfg_enable_bg = 0;
8461 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8462 phba->cfg_enable_npiv = 0;
8465 /* These SLI3 features are assumed in SLI4 */
8466 spin_lock_irq(&phba->hbalock);
8467 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8468 spin_unlock_irq(&phba->hbalock);
8470 /* Always try to enable dual dump feature if we can */
8471 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8472 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8473 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8474 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8476 "6448 Dual Dump is enabled\n");
8478 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8479 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8481 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8482 lpfc_sli_config_mbox_subsys_get(
8484 lpfc_sli_config_mbox_opcode_get(
8488 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8489 * calls depends on these resources to complete port setup.
8491 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8494 "2920 Failed to alloc Resource IDs "
8499 lpfc_set_host_data(phba, mboxq);
8501 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8503 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8504 "2134 Failed to set host os driver version %x",
8508 /* Read the port's service parameters. */
8509 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8511 phba->link_state = LPFC_HBA_ERROR;
8516 mboxq->vport = vport;
8517 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8518 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8519 if (rc == MBX_SUCCESS) {
8520 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8525 * This memory was allocated by the lpfc_read_sparam routine but is
8526 * no longer needed. It is released and ctx_buf NULLed to prevent
8527 * unintended pointer access as the mbox is reused.
8529 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8531 mboxq->ctx_buf = NULL;
8533 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8534 "0382 READ_SPARAM command failed "
8535 "status %d, mbxStatus x%x\n",
8536 rc, bf_get(lpfc_mqe_status, mqe));
8537 phba->link_state = LPFC_HBA_ERROR;
8542 lpfc_update_vport_wwn(vport);
8544 /* Update the fc_host data structures with new wwn. */
8545 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8546 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8548 /* Create all the SLI4 queues */
8549 rc = lpfc_sli4_queue_create(phba);
8551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8552 "3089 Failed to allocate queues\n");
8556 /* Set up all the queues to the device */
8557 rc = lpfc_sli4_queue_setup(phba);
8559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8560 "0381 Error %d during queue setup.\n ", rc);
8561 goto out_stop_timers;
8563 /* Initialize the driver internal SLI layer lists. */
8564 lpfc_sli4_setup(phba);
8565 lpfc_sli4_queue_init(phba);
8567 /* update host els xri-sgl sizes and mappings */
8568 rc = lpfc_sli4_els_sgl_update(phba);
8570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8571 "1400 Failed to update xri-sgl size and "
8572 "mapping: %d\n", rc);
8573 goto out_destroy_queue;
8576 /* register the els sgl pool to the port */
8577 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8578 phba->sli4_hba.els_xri_cnt);
8579 if (unlikely(rc < 0)) {
8580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8581 "0582 Error %d during els sgl post "
8584 goto out_destroy_queue;
8586 phba->sli4_hba.els_xri_cnt = rc;
8588 if (phba->nvmet_support) {
8589 /* update host nvmet xri-sgl sizes and mappings */
8590 rc = lpfc_sli4_nvmet_sgl_update(phba);
8592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8593 "6308 Failed to update nvmet-sgl size "
8594 "and mapping: %d\n", rc);
8595 goto out_destroy_queue;
8598 /* register the nvmet sgl pool to the port */
8599 rc = lpfc_sli4_repost_sgl_list(
8601 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8602 phba->sli4_hba.nvmet_xri_cnt);
8603 if (unlikely(rc < 0)) {
8604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8605 "3117 Error %d during nvmet "
8608 goto out_destroy_queue;
8610 phba->sli4_hba.nvmet_xri_cnt = rc;
8612 /* We allocate an iocbq for every receive context SGL.
8613 * The additional allocation is for abort and ls handling.
8615 cnt = phba->sli4_hba.nvmet_xri_cnt +
8616 phba->sli4_hba.max_cfg_param.max_xri;
8618 /* update host common xri-sgl sizes and mappings */
8619 rc = lpfc_sli4_io_sgl_update(phba);
8621 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8622 "6082 Failed to update nvme-sgl size "
8623 "and mapping: %d\n", rc);
8624 goto out_destroy_queue;
8627 /* register the allocated common sgl pool to the port */
8628 rc = lpfc_sli4_repost_io_sgl_list(phba);
8630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8631 "6116 Error %d during nvme sgl post "
8633 /* Some NVME buffers were moved to abort nvme list */
8634 /* A pci function reset will repost them */
8636 goto out_destroy_queue;
8638 /* Each lpfc_io_buf job structure has an iocbq element.
8639 * This cnt provides for abort, els, ct and ls requests.
8641 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8644 if (!phba->sli.iocbq_lookup) {
8645 /* Initialize and populate the iocb list per host */
8646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8647 "2821 initialize iocb list with %d entries\n",
8649 rc = lpfc_init_iocb_list(phba, cnt);
8651 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8652 "1413 Failed to init iocb list.\n");
8653 goto out_destroy_queue;
8657 if (phba->nvmet_support)
8658 lpfc_nvmet_create_targetport(phba);
8660 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8661 /* Post initial buffers to all RQs created */
8662 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8663 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8664 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8665 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8666 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8667 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8668 rqbp->buffer_count = 0;
8670 lpfc_post_rq_buffer(
8671 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8672 phba->sli4_hba.nvmet_mrq_data[i],
8673 phba->cfg_nvmet_mrq_post, i);
8677 /* Post the rpi header region to the device. */
8678 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8681 "0393 Error %d during rpi post operation\n",
8684 goto out_free_iocblist;
8686 lpfc_sli4_node_prep(phba);
8688 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8689 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8691 * The FC Port needs to register FCFI (index 0)
8693 lpfc_reg_fcfi(phba, mboxq);
8694 mboxq->vport = phba->pport;
8695 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8696 if (rc != MBX_SUCCESS)
8697 goto out_unset_queue;
8699 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8700 &mboxq->u.mqe.un.reg_fcfi);
8702 /* We are a NVME Target mode with MRQ > 1 */
8704 /* First register the FCFI */
8705 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8706 mboxq->vport = phba->pport;
8707 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8708 if (rc != MBX_SUCCESS)
8709 goto out_unset_queue;
8711 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8712 &mboxq->u.mqe.un.reg_fcfi_mrq);
8714 /* Next register the MRQs */
8715 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8716 mboxq->vport = phba->pport;
8717 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8718 if (rc != MBX_SUCCESS)
8719 goto out_unset_queue;
8722 /* Check if the port is configured to be disabled */
8723 lpfc_sli_read_link_ste(phba);
8726 /* Don't post more new bufs if repost already recovered
8729 if (phba->nvmet_support == 0) {
8730 if (phba->sli4_hba.io_xri_cnt == 0) {
8731 len = lpfc_new_io_buf(
8732 phba, phba->sli4_hba.io_xri_max);
8735 goto out_unset_queue;
8738 if (phba->cfg_xri_rebalancing)
8739 lpfc_create_multixri_pools(phba);
8742 phba->cfg_xri_rebalancing = 0;
8745 /* Allow asynchronous mailbox command to go through */
8746 spin_lock_irq(&phba->hbalock);
8747 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8748 spin_unlock_irq(&phba->hbalock);
8750 /* Post receive buffers to the device */
8751 lpfc_sli4_rb_setup(phba);
8753 /* Reset HBA FCF states after HBA reset */
8754 phba->fcf.fcf_flag = 0;
8755 phba->fcf.current_rec.flag = 0;
8757 /* Start the ELS watchdog timer */
8758 mod_timer(&vport->els_tmofunc,
8759 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8761 /* Start heart beat timer */
8762 mod_timer(&phba->hb_tmofunc,
8763 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8764 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8765 phba->last_completion_time = jiffies;
8767 /* start eq_delay heartbeat */
8768 if (phba->cfg_auto_imax)
8769 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8770 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8772 /* start per phba idle_stat_delay heartbeat */
8773 lpfc_init_idle_stat_hb(phba);
8775 /* Start error attention (ERATT) polling timer */
8776 mod_timer(&phba->eratt_poll,
8777 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8779 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8780 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8781 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8783 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8784 "2829 This device supports "
8785 "Advanced Error Reporting (AER)\n");
8786 spin_lock_irq(&phba->hbalock);
8787 phba->hba_flag |= HBA_AER_ENABLED;
8788 spin_unlock_irq(&phba->hbalock);
8790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8791 "2830 This device does not support "
8792 "Advanced Error Reporting (AER)\n");
8793 phba->cfg_aer_support = 0;
8799 * The port is ready, set the host's link state to LINK_DOWN
8800 * in preparation for link interrupts.
8802 spin_lock_irq(&phba->hbalock);
8803 phba->link_state = LPFC_LINK_DOWN;
8805 /* Check if physical ports are trunked */
8806 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8807 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8808 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8809 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8810 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8811 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8812 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8813 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8814 spin_unlock_irq(&phba->hbalock);
8816 /* Arm the CQs and then EQs on device */
8817 lpfc_sli4_arm_cqeq_intr(phba);
8819 /* Indicate device interrupt mode */
8820 phba->sli4_hba.intr_enable = 1;
8822 /* Setup CMF after HBA is initialized */
8823 lpfc_cmf_setup(phba);
8825 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8826 (phba->hba_flag & LINK_DISABLED)) {
8827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8828 "3103 Adapter Link is disabled.\n");
8829 lpfc_down_link(phba, mboxq);
8830 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8831 if (rc != MBX_SUCCESS) {
8832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8833 "3104 Adapter failed to issue "
8834 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8835 goto out_io_buff_free;
8837 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8838 /* don't perform init_link on SLI4 FC port loopback test */
8839 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8840 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8842 goto out_io_buff_free;
8845 mempool_free(mboxq, phba->mbox_mem_pool);
8847 /* Enable RAS FW log support */
8848 lpfc_sli4_ras_setup(phba);
8850 phba->hba_flag |= HBA_SETUP;
8854 /* Free allocated IO Buffers */
8857 /* Unset all the queues set up in this routine when error out */
8858 lpfc_sli4_queue_unset(phba);
8860 lpfc_free_iocb_list(phba);
8862 lpfc_sli4_queue_destroy(phba);
8864 lpfc_stop_hba_timers(phba);
8866 mempool_free(mboxq, phba->mbox_mem_pool);
8871 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8872 * @t: Context to fetch pointer to hba structure from.
8874 * This is the callback function for mailbox timer. The mailbox
8875 * timer is armed when a new mailbox command is issued and the timer
8876 * is deleted when the mailbox complete. The function is called by
8877 * the kernel timer code when a mailbox does not complete within
8878 * expected time. This function wakes up the worker thread to
8879 * process the mailbox timeout and returns. All the processing is
8880 * done by the worker thread function lpfc_mbox_timeout_handler.
8883 lpfc_mbox_timeout(struct timer_list *t)
8885 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8886 unsigned long iflag;
8887 uint32_t tmo_posted;
8889 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8890 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8892 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8893 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8896 lpfc_worker_wake_up(phba);
8901 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8903 * @phba: Pointer to HBA context object.
8905 * This function checks if any mailbox completions are present on the mailbox
8909 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8913 struct lpfc_queue *mcq;
8914 struct lpfc_mcqe *mcqe;
8915 bool pending_completions = false;
8918 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8921 /* Check for completions on mailbox completion queue */
8923 mcq = phba->sli4_hba.mbx_cq;
8924 idx = mcq->hba_index;
8925 qe_valid = mcq->qe_valid;
8926 while (bf_get_le32(lpfc_cqe_valid,
8927 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8928 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8929 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8930 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8931 pending_completions = true;
8934 idx = (idx + 1) % mcq->entry_count;
8935 if (mcq->hba_index == idx)
8938 /* if the index wrapped around, toggle the valid bit */
8939 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8940 qe_valid = (qe_valid) ? 0 : 1;
8942 return pending_completions;
8947 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8949 * @phba: Pointer to HBA context object.
8951 * For sli4, it is possible to miss an interrupt. As such mbox completions
8952 * maybe missed causing erroneous mailbox timeouts to occur. This function
8953 * checks to see if mbox completions are on the mailbox completion queue
8954 * and will process all the completions associated with the eq for the
8955 * mailbox completion queue.
8958 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8960 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8962 struct lpfc_queue *fpeq = NULL;
8963 struct lpfc_queue *eq;
8966 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8969 /* Find the EQ associated with the mbox CQ */
8970 if (sli4_hba->hdwq) {
8971 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8972 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8973 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8982 /* Turn off interrupts from this EQ */
8984 sli4_hba->sli4_eq_clr_intr(fpeq);
8986 /* Check to see if a mbox completion is pending */
8988 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8991 * If a mbox completion is pending, process all the events on EQ
8992 * associated with the mbox completion queue (this could include
8993 * mailbox commands, async events, els commands, receive queue data
8998 /* process and rearm the EQ */
8999 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
9001 /* Always clear and re-arm the EQ */
9002 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9004 return mbox_pending;
9009 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9010 * @phba: Pointer to HBA context object.
9012 * This function is called from worker thread when a mailbox command times out.
9013 * The caller is not required to hold any locks. This function will reset the
9014 * HBA and recover all the pending commands.
9017 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9019 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9020 MAILBOX_t *mb = NULL;
9022 struct lpfc_sli *psli = &phba->sli;
9024 /* If the mailbox completed, process the completion */
9025 lpfc_sli4_process_missed_mbox_completions(phba);
9027 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9032 /* Check the pmbox pointer first. There is a race condition
9033 * between the mbox timeout handler getting executed in the
9034 * worklist and the mailbox actually completing. When this
9035 * race condition occurs, the mbox_active will be NULL.
9037 spin_lock_irq(&phba->hbalock);
9038 if (pmbox == NULL) {
9039 lpfc_printf_log(phba, KERN_WARNING,
9041 "0353 Active Mailbox cleared - mailbox timeout "
9043 spin_unlock_irq(&phba->hbalock);
9047 /* Mbox cmd <mbxCommand> timeout */
9048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9049 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9051 phba->pport->port_state,
9053 phba->sli.mbox_active);
9054 spin_unlock_irq(&phba->hbalock);
9056 /* Setting state unknown so lpfc_sli_abort_iocb_ring
9057 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9058 * it to fail all outstanding SCSI IO.
9060 spin_lock_irq(&phba->pport->work_port_lock);
9061 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9062 spin_unlock_irq(&phba->pport->work_port_lock);
9063 spin_lock_irq(&phba->hbalock);
9064 phba->link_state = LPFC_LINK_UNKNOWN;
9065 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9066 spin_unlock_irq(&phba->hbalock);
9068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9069 "0345 Resetting board due to mailbox timeout\n");
9071 /* Reset the HBA device */
9072 lpfc_reset_hba(phba);
9076 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9077 * @phba: Pointer to HBA context object.
9078 * @pmbox: Pointer to mailbox object.
9079 * @flag: Flag indicating how the mailbox need to be processed.
9081 * This function is called by discovery code and HBA management code
9082 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9083 * function gets the hbalock to protect the data structures.
9084 * The mailbox command can be submitted in polling mode, in which case
9085 * this function will wait in a polling loop for the completion of the
9087 * If the mailbox is submitted in no_wait mode (not polling) the
9088 * function will submit the command and returns immediately without waiting
9089 * for the mailbox completion. The no_wait is supported only when HBA
9090 * is in SLI2/SLI3 mode - interrupts are enabled.
9091 * The SLI interface allows only one mailbox pending at a time. If the
9092 * mailbox is issued in polling mode and there is already a mailbox
9093 * pending, then the function will return an error. If the mailbox is issued
9094 * in NO_WAIT mode and there is a mailbox pending already, the function
9095 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9096 * The sli layer owns the mailbox object until the completion of mailbox
9097 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9098 * return codes the caller owns the mailbox command after the return of
9102 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9106 struct lpfc_sli *psli = &phba->sli;
9107 uint32_t status, evtctr;
9108 uint32_t ha_copy, hc_copy;
9110 unsigned long timeout;
9111 unsigned long drvr_flag = 0;
9112 uint32_t word0, ldata;
9113 void __iomem *to_slim;
9114 int processing_queue = 0;
9116 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9118 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9119 /* processing mbox queue from intr_handler */
9120 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9121 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9124 processing_queue = 1;
9125 pmbox = lpfc_mbox_get(phba);
9127 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9132 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9133 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9135 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9136 lpfc_printf_log(phba, KERN_ERR,
9137 LOG_MBOX | LOG_VPORT,
9138 "1806 Mbox x%x failed. No vport\n",
9139 pmbox->u.mb.mbxCommand);
9141 goto out_not_finished;
9145 /* If the PCI channel is in offline state, do not post mbox. */
9146 if (unlikely(pci_channel_offline(phba->pcidev))) {
9147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9148 goto out_not_finished;
9151 /* If HBA has a deferred error attention, fail the iocb. */
9152 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9153 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9154 goto out_not_finished;
9160 status = MBX_SUCCESS;
9162 if (phba->link_state == LPFC_HBA_ERROR) {
9163 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9165 /* Mbox command <mbxCommand> cannot issue */
9166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9167 "(%d):0311 Mailbox command x%x cannot "
9168 "issue Data: x%x x%x\n",
9169 pmbox->vport ? pmbox->vport->vpi : 0,
9170 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9171 goto out_not_finished;
9174 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9175 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9176 !(hc_copy & HC_MBINT_ENA)) {
9177 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9179 "(%d):2528 Mailbox command x%x cannot "
9180 "issue Data: x%x x%x\n",
9181 pmbox->vport ? pmbox->vport->vpi : 0,
9182 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9183 goto out_not_finished;
9187 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9188 /* Polling for a mbox command when another one is already active
9189 * is not allowed in SLI. Also, the driver must have established
9190 * SLI2 mode to queue and process multiple mbox commands.
9193 if (flag & MBX_POLL) {
9194 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9196 /* Mbox command <mbxCommand> cannot issue */
9197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9198 "(%d):2529 Mailbox command x%x "
9199 "cannot issue Data: x%x x%x\n",
9200 pmbox->vport ? pmbox->vport->vpi : 0,
9201 pmbox->u.mb.mbxCommand,
9202 psli->sli_flag, flag);
9203 goto out_not_finished;
9206 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9207 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9208 /* Mbox command <mbxCommand> cannot issue */
9209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9210 "(%d):2530 Mailbox command x%x "
9211 "cannot issue Data: x%x x%x\n",
9212 pmbox->vport ? pmbox->vport->vpi : 0,
9213 pmbox->u.mb.mbxCommand,
9214 psli->sli_flag, flag);
9215 goto out_not_finished;
9218 /* Another mailbox command is still being processed, queue this
9219 * command to be processed later.
9221 lpfc_mbox_put(phba, pmbox);
9223 /* Mbox cmd issue - BUSY */
9224 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9225 "(%d):0308 Mbox cmd issue - BUSY Data: "
9226 "x%x x%x x%x x%x\n",
9227 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9229 phba->pport ? phba->pport->port_state : 0xff,
9230 psli->sli_flag, flag);
9232 psli->slistat.mbox_busy++;
9233 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9236 lpfc_debugfs_disc_trc(pmbox->vport,
9237 LPFC_DISC_TRC_MBOX_VPORT,
9238 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9239 (uint32_t)mbx->mbxCommand,
9240 mbx->un.varWords[0], mbx->un.varWords[1]);
9243 lpfc_debugfs_disc_trc(phba->pport,
9245 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9246 (uint32_t)mbx->mbxCommand,
9247 mbx->un.varWords[0], mbx->un.varWords[1]);
9253 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9255 /* If we are not polling, we MUST be in SLI2 mode */
9256 if (flag != MBX_POLL) {
9257 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9258 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9259 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9260 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9261 /* Mbox command <mbxCommand> cannot issue */
9262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9263 "(%d):2531 Mailbox command x%x "
9264 "cannot issue Data: x%x x%x\n",
9265 pmbox->vport ? pmbox->vport->vpi : 0,
9266 pmbox->u.mb.mbxCommand,
9267 psli->sli_flag, flag);
9268 goto out_not_finished;
9270 /* timeout active mbox command */
9271 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9273 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9276 /* Mailbox cmd <cmd> issue */
9277 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9278 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9280 pmbox->vport ? pmbox->vport->vpi : 0,
9282 phba->pport ? phba->pport->port_state : 0xff,
9283 psli->sli_flag, flag);
9285 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9287 lpfc_debugfs_disc_trc(pmbox->vport,
9288 LPFC_DISC_TRC_MBOX_VPORT,
9289 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9290 (uint32_t)mbx->mbxCommand,
9291 mbx->un.varWords[0], mbx->un.varWords[1]);
9294 lpfc_debugfs_disc_trc(phba->pport,
9296 "MBOX Send: cmd:x%x mb:x%x x%x",
9297 (uint32_t)mbx->mbxCommand,
9298 mbx->un.varWords[0], mbx->un.varWords[1]);
9302 psli->slistat.mbox_cmd++;
9303 evtctr = psli->slistat.mbox_event;
9305 /* next set own bit for the adapter and copy over command word */
9306 mbx->mbxOwner = OWN_CHIP;
9308 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9309 /* Populate mbox extension offset word. */
9310 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9311 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9312 = (uint8_t *)phba->mbox_ext
9313 - (uint8_t *)phba->mbox;
9316 /* Copy the mailbox extension data */
9317 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9318 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9319 (uint8_t *)phba->mbox_ext,
9320 pmbox->in_ext_byte_len);
9322 /* Copy command data to host SLIM area */
9323 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9325 /* Populate mbox extension offset word. */
9326 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9327 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9328 = MAILBOX_HBA_EXT_OFFSET;
9330 /* Copy the mailbox extension data */
9331 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9332 lpfc_memcpy_to_slim(phba->MBslimaddr +
9333 MAILBOX_HBA_EXT_OFFSET,
9334 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9336 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9337 /* copy command data into host mbox for cmpl */
9338 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9341 /* First copy mbox command data to HBA SLIM, skip past first
9343 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9344 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9345 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9347 /* Next copy over first word, with mbxOwner set */
9348 ldata = *((uint32_t *)mbx);
9349 to_slim = phba->MBslimaddr;
9350 writel(ldata, to_slim);
9351 readl(to_slim); /* flush */
9353 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9354 /* switch over to host mailbox */
9355 psli->sli_flag |= LPFC_SLI_ACTIVE;
9362 /* Set up reference to mailbox command */
9363 psli->mbox_active = pmbox;
9364 /* Interrupt board to do it */
9365 writel(CA_MBATT, phba->CAregaddr);
9366 readl(phba->CAregaddr); /* flush */
9367 /* Don't wait for it to finish, just return */
9371 /* Set up null reference to mailbox command */
9372 psli->mbox_active = NULL;
9373 /* Interrupt board to do it */
9374 writel(CA_MBATT, phba->CAregaddr);
9375 readl(phba->CAregaddr); /* flush */
9377 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9378 /* First read mbox status word */
9379 word0 = *((uint32_t *)phba->mbox);
9380 word0 = le32_to_cpu(word0);
9382 /* First read mbox status word */
9383 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9384 spin_unlock_irqrestore(&phba->hbalock,
9386 goto out_not_finished;
9390 /* Read the HBA Host Attention Register */
9391 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9392 spin_unlock_irqrestore(&phba->hbalock,
9394 goto out_not_finished;
9396 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9399 /* Wait for command to complete */
9400 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9401 (!(ha_copy & HA_MBATT) &&
9402 (phba->link_state > LPFC_WARM_START))) {
9403 if (time_after(jiffies, timeout)) {
9404 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9405 spin_unlock_irqrestore(&phba->hbalock,
9407 goto out_not_finished;
9410 /* Check if we took a mbox interrupt while we were
9412 if (((word0 & OWN_CHIP) != OWN_CHIP)
9413 && (evtctr != psli->slistat.mbox_event))
9417 spin_unlock_irqrestore(&phba->hbalock,
9420 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9423 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9424 /* First copy command data */
9425 word0 = *((uint32_t *)phba->mbox);
9426 word0 = le32_to_cpu(word0);
9427 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9430 /* Check real SLIM for any errors */
9431 slimword0 = readl(phba->MBslimaddr);
9432 slimmb = (MAILBOX_t *) & slimword0;
9433 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9434 && slimmb->mbxStatus) {
9441 /* First copy command data */
9442 word0 = readl(phba->MBslimaddr);
9444 /* Read the HBA Host Attention Register */
9445 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9446 spin_unlock_irqrestore(&phba->hbalock,
9448 goto out_not_finished;
9452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9453 /* copy results back to user */
9454 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9456 /* Copy the mailbox extension data */
9457 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9458 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9460 pmbox->out_ext_byte_len);
9463 /* First copy command data */
9464 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9466 /* Copy the mailbox extension data */
9467 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9468 lpfc_memcpy_from_slim(
9471 MAILBOX_HBA_EXT_OFFSET,
9472 pmbox->out_ext_byte_len);
9476 writel(HA_MBATT, phba->HAregaddr);
9477 readl(phba->HAregaddr); /* flush */
9479 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9480 status = mbx->mbxStatus;
9483 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9487 if (processing_queue) {
9488 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9489 lpfc_mbox_cmpl_put(phba, pmbox);
9491 return MBX_NOT_FINISHED;
9495 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9496 * @phba: Pointer to HBA context object.
9498 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9499 * the driver internal pending mailbox queue. It will then try to wait out the
9500 * possible outstanding mailbox command before return.
9503 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9504 * the outstanding mailbox command timed out.
9507 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9509 struct lpfc_sli *psli = &phba->sli;
9510 LPFC_MBOXQ_t *mboxq;
9512 unsigned long timeout = 0;
9514 u8 cmd, subsys, opcode;
9516 /* Mark the asynchronous mailbox command posting as blocked */
9517 spin_lock_irq(&phba->hbalock);
9518 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9519 /* Determine how long we might wait for the active mailbox
9520 * command to be gracefully completed by firmware.
9522 if (phba->sli.mbox_active)
9523 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9524 phba->sli.mbox_active) *
9526 spin_unlock_irq(&phba->hbalock);
9528 /* Make sure the mailbox is really active */
9530 lpfc_sli4_process_missed_mbox_completions(phba);
9532 /* Wait for the outstanding mailbox command to complete */
9533 while (phba->sli.mbox_active) {
9534 /* Check active mailbox complete status every 2ms */
9536 if (time_after(jiffies, timeout)) {
9537 /* Timeout, mark the outstanding cmd not complete */
9539 /* Sanity check sli.mbox_active has not completed or
9540 * cancelled from another context during last 2ms sleep,
9541 * so take hbalock to be sure before logging.
9543 spin_lock_irq(&phba->hbalock);
9544 if (phba->sli.mbox_active) {
9545 mboxq = phba->sli.mbox_active;
9546 cmd = mboxq->u.mb.mbxCommand;
9547 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9549 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9551 sli_flag = psli->sli_flag;
9552 spin_unlock_irq(&phba->hbalock);
9553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9554 "2352 Mailbox command x%x "
9555 "(x%x/x%x) sli_flag x%x could "
9557 cmd, subsys, opcode,
9560 spin_unlock_irq(&phba->hbalock);
9568 /* Can not cleanly block async mailbox command, fails it */
9570 spin_lock_irq(&phba->hbalock);
9571 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9572 spin_unlock_irq(&phba->hbalock);
9578 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9579 * @phba: Pointer to HBA context object.
9581 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9582 * commands from the driver internal pending mailbox queue. It makes sure
9583 * that there is no outstanding mailbox command before resuming posting
9584 * asynchronous mailbox commands. If, for any reason, there is outstanding
9585 * mailbox command, it will try to wait it out before resuming asynchronous
9586 * mailbox command posting.
9589 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9591 struct lpfc_sli *psli = &phba->sli;
9593 spin_lock_irq(&phba->hbalock);
9594 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9595 /* Asynchronous mailbox posting is not blocked, do nothing */
9596 spin_unlock_irq(&phba->hbalock);
9600 /* Outstanding synchronous mailbox command is guaranteed to be done,
9601 * successful or timeout, after timing-out the outstanding mailbox
9602 * command shall always be removed, so just unblock posting async
9603 * mailbox command and resume
9605 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9606 spin_unlock_irq(&phba->hbalock);
9608 /* wake up worker thread to post asynchronous mailbox command */
9609 lpfc_worker_wake_up(phba);
9613 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9614 * @phba: Pointer to HBA context object.
9615 * @mboxq: Pointer to mailbox object.
9617 * The function waits for the bootstrap mailbox register ready bit from
9618 * port for twice the regular mailbox command timeout value.
9620 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9621 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
9624 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9627 unsigned long timeout;
9628 struct lpfc_register bmbx_reg;
9630 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9634 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9635 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9639 if (time_after(jiffies, timeout))
9640 return MBXERR_ERROR;
9641 } while (!db_ready);
9647 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9648 * @phba: Pointer to HBA context object.
9649 * @mboxq: Pointer to mailbox object.
9651 * The function posts a mailbox to the port. The mailbox is expected
9652 * to be comletely filled in and ready for the port to operate on it.
9653 * This routine executes a synchronous completion operation on the
9654 * mailbox by polling for its completion.
9656 * The caller must not be holding any locks when calling this routine.
9659 * MBX_SUCCESS - mailbox posted successfully
9660 * Any of the MBX error values.
9663 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9665 int rc = MBX_SUCCESS;
9666 unsigned long iflag;
9667 uint32_t mcqe_status;
9669 struct lpfc_sli *psli = &phba->sli;
9670 struct lpfc_mqe *mb = &mboxq->u.mqe;
9671 struct lpfc_bmbx_create *mbox_rgn;
9672 struct dma_address *dma_address;
9675 * Only one mailbox can be active to the bootstrap mailbox region
9676 * at a time and there is no queueing provided.
9678 spin_lock_irqsave(&phba->hbalock, iflag);
9679 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9680 spin_unlock_irqrestore(&phba->hbalock, iflag);
9681 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9682 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9683 "cannot issue Data: x%x x%x\n",
9684 mboxq->vport ? mboxq->vport->vpi : 0,
9685 mboxq->u.mb.mbxCommand,
9686 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9687 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9688 psli->sli_flag, MBX_POLL);
9689 return MBXERR_ERROR;
9691 /* The server grabs the token and owns it until release */
9692 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9693 phba->sli.mbox_active = mboxq;
9694 spin_unlock_irqrestore(&phba->hbalock, iflag);
9696 /* wait for bootstrap mbox register for readyness */
9697 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9701 * Initialize the bootstrap memory region to avoid stale data areas
9702 * in the mailbox post. Then copy the caller's mailbox contents to
9703 * the bmbx mailbox region.
9705 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9706 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9707 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9708 sizeof(struct lpfc_mqe));
9710 /* Post the high mailbox dma address to the port and wait for ready. */
9711 dma_address = &phba->sli4_hba.bmbx.dma_address;
9712 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9714 /* wait for bootstrap mbox register for hi-address write done */
9715 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9719 /* Post the low mailbox dma address to the port. */
9720 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9722 /* wait for bootstrap mbox register for low address write done */
9723 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9728 * Read the CQ to ensure the mailbox has completed.
9729 * If so, update the mailbox status so that the upper layers
9730 * can complete the request normally.
9732 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9733 sizeof(struct lpfc_mqe));
9734 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9735 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9736 sizeof(struct lpfc_mcqe));
9737 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9739 * When the CQE status indicates a failure and the mailbox status
9740 * indicates success then copy the CQE status into the mailbox status
9741 * (and prefix it with x4000).
9743 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9744 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9745 bf_set(lpfc_mqe_status, mb,
9746 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9749 lpfc_sli4_swap_str(phba, mboxq);
9751 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9752 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9753 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9754 " x%x x%x CQ: x%x x%x x%x x%x\n",
9755 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9756 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9757 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9758 bf_get(lpfc_mqe_status, mb),
9759 mb->un.mb_words[0], mb->un.mb_words[1],
9760 mb->un.mb_words[2], mb->un.mb_words[3],
9761 mb->un.mb_words[4], mb->un.mb_words[5],
9762 mb->un.mb_words[6], mb->un.mb_words[7],
9763 mb->un.mb_words[8], mb->un.mb_words[9],
9764 mb->un.mb_words[10], mb->un.mb_words[11],
9765 mb->un.mb_words[12], mboxq->mcqe.word0,
9766 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9767 mboxq->mcqe.trailer);
9769 /* We are holding the token, no needed for lock when release */
9770 spin_lock_irqsave(&phba->hbalock, iflag);
9771 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9772 phba->sli.mbox_active = NULL;
9773 spin_unlock_irqrestore(&phba->hbalock, iflag);
9778 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9779 * @phba: Pointer to HBA context object.
9780 * @mboxq: Pointer to mailbox object.
9781 * @flag: Flag indicating how the mailbox need to be processed.
9783 * This function is called by discovery code and HBA management code to submit
9784 * a mailbox command to firmware with SLI-4 interface spec.
9786 * Return codes the caller owns the mailbox command after the return of the
9790 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9793 struct lpfc_sli *psli = &phba->sli;
9794 unsigned long iflags;
9797 /* dump from issue mailbox command if setup */
9798 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9800 rc = lpfc_mbox_dev_check(phba);
9802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9803 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9804 "cannot issue Data: x%x x%x\n",
9805 mboxq->vport ? mboxq->vport->vpi : 0,
9806 mboxq->u.mb.mbxCommand,
9807 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9808 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9809 psli->sli_flag, flag);
9810 goto out_not_finished;
9813 /* Detect polling mode and jump to a handler */
9814 if (!phba->sli4_hba.intr_enable) {
9815 if (flag == MBX_POLL)
9816 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9819 if (rc != MBX_SUCCESS)
9820 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9821 "(%d):2541 Mailbox command x%x "
9822 "(x%x/x%x) failure: "
9823 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9825 mboxq->vport ? mboxq->vport->vpi : 0,
9826 mboxq->u.mb.mbxCommand,
9827 lpfc_sli_config_mbox_subsys_get(phba,
9829 lpfc_sli_config_mbox_opcode_get(phba,
9831 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9832 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9833 bf_get(lpfc_mcqe_ext_status,
9835 psli->sli_flag, flag);
9837 } else if (flag == MBX_POLL) {
9838 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9839 "(%d):2542 Try to issue mailbox command "
9840 "x%x (x%x/x%x) synchronously ahead of async "
9841 "mailbox command queue: x%x x%x\n",
9842 mboxq->vport ? mboxq->vport->vpi : 0,
9843 mboxq->u.mb.mbxCommand,
9844 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9845 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9846 psli->sli_flag, flag);
9847 /* Try to block the asynchronous mailbox posting */
9848 rc = lpfc_sli4_async_mbox_block(phba);
9850 /* Successfully blocked, now issue sync mbox cmd */
9851 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9852 if (rc != MBX_SUCCESS)
9853 lpfc_printf_log(phba, KERN_WARNING,
9855 "(%d):2597 Sync Mailbox command "
9856 "x%x (x%x/x%x) failure: "
9857 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9859 mboxq->vport ? mboxq->vport->vpi : 0,
9860 mboxq->u.mb.mbxCommand,
9861 lpfc_sli_config_mbox_subsys_get(phba,
9863 lpfc_sli_config_mbox_opcode_get(phba,
9865 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9866 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9867 bf_get(lpfc_mcqe_ext_status,
9869 psli->sli_flag, flag);
9870 /* Unblock the async mailbox posting afterward */
9871 lpfc_sli4_async_mbox_unblock(phba);
9876 /* Now, interrupt mode asynchronous mailbox command */
9877 rc = lpfc_mbox_cmd_check(phba, mboxq);
9879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9880 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9881 "cannot issue Data: x%x x%x\n",
9882 mboxq->vport ? mboxq->vport->vpi : 0,
9883 mboxq->u.mb.mbxCommand,
9884 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9885 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9886 psli->sli_flag, flag);
9887 goto out_not_finished;
9890 /* Put the mailbox command to the driver internal FIFO */
9891 psli->slistat.mbox_busy++;
9892 spin_lock_irqsave(&phba->hbalock, iflags);
9893 lpfc_mbox_put(phba, mboxq);
9894 spin_unlock_irqrestore(&phba->hbalock, iflags);
9895 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9896 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9897 "x%x (x%x/x%x) x%x x%x x%x\n",
9898 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9899 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9900 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9901 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9902 phba->pport->port_state,
9903 psli->sli_flag, MBX_NOWAIT);
9904 /* Wake up worker thread to transport mailbox command from head */
9905 lpfc_worker_wake_up(phba);
9910 return MBX_NOT_FINISHED;
9914 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9915 * @phba: Pointer to HBA context object.
9917 * This function is called by worker thread to send a mailbox command to
9918 * SLI4 HBA firmware.
9922 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9924 struct lpfc_sli *psli = &phba->sli;
9925 LPFC_MBOXQ_t *mboxq;
9926 int rc = MBX_SUCCESS;
9927 unsigned long iflags;
9928 struct lpfc_mqe *mqe;
9931 /* Check interrupt mode before post async mailbox command */
9932 if (unlikely(!phba->sli4_hba.intr_enable))
9933 return MBX_NOT_FINISHED;
9935 /* Check for mailbox command service token */
9936 spin_lock_irqsave(&phba->hbalock, iflags);
9937 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9938 spin_unlock_irqrestore(&phba->hbalock, iflags);
9939 return MBX_NOT_FINISHED;
9941 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9942 spin_unlock_irqrestore(&phba->hbalock, iflags);
9943 return MBX_NOT_FINISHED;
9945 if (unlikely(phba->sli.mbox_active)) {
9946 spin_unlock_irqrestore(&phba->hbalock, iflags);
9947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9948 "0384 There is pending active mailbox cmd\n");
9949 return MBX_NOT_FINISHED;
9951 /* Take the mailbox command service token */
9952 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9954 /* Get the next mailbox command from head of queue */
9955 mboxq = lpfc_mbox_get(phba);
9957 /* If no more mailbox command waiting for post, we're done */
9959 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9960 spin_unlock_irqrestore(&phba->hbalock, iflags);
9963 phba->sli.mbox_active = mboxq;
9964 spin_unlock_irqrestore(&phba->hbalock, iflags);
9966 /* Check device readiness for posting mailbox command */
9967 rc = lpfc_mbox_dev_check(phba);
9969 /* Driver clean routine will clean up pending mailbox */
9970 goto out_not_finished;
9972 /* Prepare the mbox command to be posted */
9973 mqe = &mboxq->u.mqe;
9974 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9976 /* Start timer for the mbox_tmo and log some mailbox post messages */
9977 mod_timer(&psli->mbox_tmo, (jiffies +
9978 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9980 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9981 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9983 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9984 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9985 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9986 phba->pport->port_state, psli->sli_flag);
9988 if (mbx_cmnd != MBX_HEARTBEAT) {
9990 lpfc_debugfs_disc_trc(mboxq->vport,
9991 LPFC_DISC_TRC_MBOX_VPORT,
9992 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9993 mbx_cmnd, mqe->un.mb_words[0],
9994 mqe->un.mb_words[1]);
9996 lpfc_debugfs_disc_trc(phba->pport,
9998 "MBOX Send: cmd:x%x mb:x%x x%x",
9999 mbx_cmnd, mqe->un.mb_words[0],
10000 mqe->un.mb_words[1]);
10003 psli->slistat.mbox_cmd++;
10005 /* Post the mailbox command to the port */
10006 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10007 if (rc != MBX_SUCCESS) {
10008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10009 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10010 "cannot issue Data: x%x x%x\n",
10011 mboxq->vport ? mboxq->vport->vpi : 0,
10012 mboxq->u.mb.mbxCommand,
10013 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10014 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10015 psli->sli_flag, MBX_NOWAIT);
10016 goto out_not_finished;
10022 spin_lock_irqsave(&phba->hbalock, iflags);
10023 if (phba->sli.mbox_active) {
10024 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10025 __lpfc_mbox_cmpl_put(phba, mboxq);
10026 /* Release the token */
10027 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10028 phba->sli.mbox_active = NULL;
10030 spin_unlock_irqrestore(&phba->hbalock, iflags);
10032 return MBX_NOT_FINISHED;
10036 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10037 * @phba: Pointer to HBA context object.
10038 * @pmbox: Pointer to mailbox object.
10039 * @flag: Flag indicating how the mailbox need to be processed.
10041 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10042 * the API jump table function pointer from the lpfc_hba struct.
10044 * Return codes the caller owns the mailbox command after the return of the
10048 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10050 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10054 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10055 * @phba: The hba struct for which this call is being executed.
10056 * @dev_grp: The HBA PCI-Device group number.
10058 * This routine sets up the mbox interface API function jump table in @phba
10060 * Returns: 0 - success, -ENODEV - failure.
10063 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10067 case LPFC_PCI_DEV_LP:
10068 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10069 phba->lpfc_sli_handle_slow_ring_event =
10070 lpfc_sli_handle_slow_ring_event_s3;
10071 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10072 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10073 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10075 case LPFC_PCI_DEV_OC:
10076 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10077 phba->lpfc_sli_handle_slow_ring_event =
10078 lpfc_sli_handle_slow_ring_event_s4;
10079 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10080 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10081 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10085 "1420 Invalid HBA PCI-device group: 0x%x\n",
10093 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10094 * @phba: Pointer to HBA context object.
10095 * @pring: Pointer to driver SLI ring object.
10096 * @piocb: Pointer to address of newly added command iocb.
10098 * This function is called with hbalock held for SLI3 ports or
10099 * the ring lock held for SLI4 ports to add a command
10100 * iocb to the txq when SLI layer cannot submit the command iocb
10104 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10105 struct lpfc_iocbq *piocb)
10107 if (phba->sli_rev == LPFC_SLI_REV4)
10108 lockdep_assert_held(&pring->ring_lock);
10110 lockdep_assert_held(&phba->hbalock);
10111 /* Insert the caller's iocb in the txq tail for later processing. */
10112 list_add_tail(&piocb->list, &pring->txq);
10116 * lpfc_sli_next_iocb - Get the next iocb in the txq
10117 * @phba: Pointer to HBA context object.
10118 * @pring: Pointer to driver SLI ring object.
10119 * @piocb: Pointer to address of newly added command iocb.
10121 * This function is called with hbalock held before a new
10122 * iocb is submitted to the firmware. This function checks
10123 * txq to flush the iocbs in txq to Firmware before
10124 * submitting new iocbs to the Firmware.
10125 * If there are iocbs in the txq which need to be submitted
10126 * to firmware, lpfc_sli_next_iocb returns the first element
10127 * of the txq after dequeuing it from txq.
10128 * If there is no iocb in the txq then the function will return
10129 * *piocb and *piocb is set to NULL. Caller needs to check
10130 * *piocb to find if there are more commands in the txq.
10132 static struct lpfc_iocbq *
10133 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10134 struct lpfc_iocbq **piocb)
10136 struct lpfc_iocbq * nextiocb;
10138 lockdep_assert_held(&phba->hbalock);
10140 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10150 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10151 * @phba: Pointer to HBA context object.
10152 * @ring_number: SLI ring number to issue iocb on.
10153 * @piocb: Pointer to command iocb.
10154 * @flag: Flag indicating if this command can be put into txq.
10156 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10157 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10158 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10159 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10160 * this function allows only iocbs for posting buffers. This function finds
10161 * next available slot in the command ring and posts the command to the
10162 * available slot and writes the port attention register to request HBA start
10163 * processing new iocb. If there is no slot available in the ring and
10164 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10165 * the function returns IOCB_BUSY.
10167 * This function is called with hbalock held. The function will return success
10168 * after it successfully submit the iocb to firmware or after adding to the
10172 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10173 struct lpfc_iocbq *piocb, uint32_t flag)
10175 struct lpfc_iocbq *nextiocb;
10177 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10179 lockdep_assert_held(&phba->hbalock);
10181 if (piocb->cmd_cmpl && (!piocb->vport) &&
10182 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10183 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10185 "1807 IOCB x%x failed. No vport\n",
10186 piocb->iocb.ulpCommand);
10192 /* If the PCI channel is in offline state, do not post iocbs. */
10193 if (unlikely(pci_channel_offline(phba->pcidev)))
10196 /* If HBA has a deferred error attention, fail the iocb. */
10197 if (unlikely(phba->hba_flag & DEFER_ERATT))
10201 * We should never get an IOCB if we are in a < LINK_DOWN state
10203 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10207 * Check to see if we are blocking IOCB processing because of a
10208 * outstanding event.
10210 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10213 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10215 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10216 * can be issued if the link is not up.
10218 switch (piocb->iocb.ulpCommand) {
10219 case CMD_GEN_REQUEST64_CR:
10220 case CMD_GEN_REQUEST64_CX:
10221 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
10222 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
10223 FC_RCTL_DD_UNSOL_CMD) ||
10224 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
10225 MENLO_TRANSPORT_TYPE))
10229 case CMD_QUE_RING_BUF_CN:
10230 case CMD_QUE_RING_BUF64_CN:
10232 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10233 * completion, cmd_cmpl MUST be 0.
10235 if (piocb->cmd_cmpl)
10236 piocb->cmd_cmpl = NULL;
10238 case CMD_CREATE_XRI_CR:
10239 case CMD_CLOSE_XRI_CN:
10240 case CMD_CLOSE_XRI_CX:
10247 * For FCP commands, we must be in a state where we can process link
10248 * attention events.
10250 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10251 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10255 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10256 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10257 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10260 lpfc_sli_update_ring(phba, pring);
10262 lpfc_sli_update_full_ring(phba, pring);
10265 return IOCB_SUCCESS;
10270 pring->stats.iocb_cmd_delay++;
10274 if (!(flag & SLI_IOCB_RET_IOCB)) {
10275 __lpfc_sli_ringtx_put(phba, pring, piocb);
10276 return IOCB_SUCCESS;
10283 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10284 * @phba: Pointer to HBA context object.
10285 * @ring_number: SLI ring number to issue wqe on.
10286 * @piocb: Pointer to command iocb.
10287 * @flag: Flag indicating if this command can be put into txq.
10289 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10290 * send an iocb command to an HBA with SLI-3 interface spec.
10292 * This function takes the hbalock before invoking the lockless version.
10293 * The function will return success after it successfully submit the wqe to
10294 * firmware or after adding to the txq.
10297 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10298 struct lpfc_iocbq *piocb, uint32_t flag)
10300 unsigned long iflags;
10303 spin_lock_irqsave(&phba->hbalock, iflags);
10304 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10305 spin_unlock_irqrestore(&phba->hbalock, iflags);
10311 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10312 * @phba: Pointer to HBA context object.
10313 * @ring_number: SLI ring number to issue wqe on.
10314 * @piocb: Pointer to command iocb.
10315 * @flag: Flag indicating if this command can be put into txq.
10317 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10318 * an wqe command to an HBA with SLI-4 interface spec.
10320 * This function is a lockless version. The function will return success
10321 * after it successfully submit the wqe to firmware or after adding to the
10325 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10326 struct lpfc_iocbq *piocb, uint32_t flag)
10329 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10331 lpfc_prep_embed_io(phba, lpfc_cmd);
10332 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10337 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10339 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10340 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10341 struct sli4_sge *sgl;
10343 /* 128 byte wqe support here */
10344 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10346 if (phba->fcp_embed_io) {
10347 struct fcp_cmnd *fcp_cmnd;
10350 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10352 /* Word 0-2 - FCP_CMND */
10353 wqe->generic.bde.tus.f.bdeFlags =
10354 BUFF_TYPE_BDE_IMMED;
10355 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10356 wqe->generic.bde.addrHigh = 0;
10357 wqe->generic.bde.addrLow = 88; /* Word 22 */
10359 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10360 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10362 /* Word 22-29 FCP CMND Payload */
10363 ptr = &wqe->words[22];
10364 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10366 /* Word 0-2 - Inline BDE */
10367 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10368 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10369 wqe->generic.bde.addrHigh = sgl->addr_hi;
10370 wqe->generic.bde.addrLow = sgl->addr_lo;
10373 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10374 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10377 /* add the VMID tags as per switch response */
10378 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10379 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10380 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10381 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10382 (piocb->vmid_tag.cs_ctl_vmid));
10383 } else if (phba->cfg_vmid_app_header) {
10384 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10385 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10386 wqe->words[31] = piocb->vmid_tag.app_id;
10392 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10393 * @phba: Pointer to HBA context object.
10394 * @ring_number: SLI ring number to issue iocb on.
10395 * @piocb: Pointer to command iocb.
10396 * @flag: Flag indicating if this command can be put into txq.
10398 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10399 * an iocb command to an HBA with SLI-4 interface spec.
10401 * This function is called with ringlock held. The function will return success
10402 * after it successfully submit the iocb to firmware or after adding to the
10406 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10407 struct lpfc_iocbq *piocb, uint32_t flag)
10409 struct lpfc_sglq *sglq;
10410 union lpfc_wqe128 *wqe;
10411 struct lpfc_queue *wq;
10412 struct lpfc_sli_ring *pring;
10413 u32 ulp_command = get_job_cmnd(phba, piocb);
10416 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10417 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10418 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10420 wq = phba->sli4_hba.els_wq;
10423 /* Get corresponding ring */
10427 * The WQE can be either 64 or 128 bytes,
10430 lockdep_assert_held(&pring->ring_lock);
10432 if (piocb->sli4_xritag == NO_XRI) {
10433 if (ulp_command == CMD_ABORT_XRI_CX)
10436 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10438 if (!(flag & SLI_IOCB_RET_IOCB)) {
10439 __lpfc_sli_ringtx_put(phba,
10442 return IOCB_SUCCESS;
10448 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10449 /* These IO's already have an XRI and a mapped sgl. */
10454 * This is a continuation of a commandi,(CX) so this
10455 * sglq is on the active list
10457 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10463 piocb->sli4_lxritag = sglq->sli4_lxritag;
10464 piocb->sli4_xritag = sglq->sli4_xritag;
10466 /* ABTS sent by initiator to CT exchange, the
10467 * RX_ID field will be filled with the newly
10468 * allocated responder XRI.
10470 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10471 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10472 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10473 piocb->sli4_xritag);
10475 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10476 piocb->sli4_xritag);
10478 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10482 if (lpfc_sli4_wq_put(wq, wqe))
10485 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10491 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10493 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10494 * or IOCB for sli-3 function.
10495 * pointer from the lpfc_hba struct.
10498 * IOCB_ERROR - Error
10499 * IOCB_SUCCESS - Success
10503 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10504 struct lpfc_iocbq *piocb, uint32_t flag)
10506 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10510 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10512 * This routine wraps the actual lockless version for issusing IOCB function
10513 * pointer from the lpfc_hba struct.
10516 * IOCB_ERROR - Error
10517 * IOCB_SUCCESS - Success
10521 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10522 struct lpfc_iocbq *piocb, uint32_t flag)
10524 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10528 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10529 struct lpfc_vport *vport,
10530 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10531 u32 elscmd, u8 tmo, u8 expect_rsp)
10533 struct lpfc_hba *phba = vport->phba;
10536 cmd = &cmdiocbq->iocb;
10537 memset(cmd, 0, sizeof(*cmd));
10539 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10540 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10541 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10544 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10545 cmd->un.elsreq64.remoteID = did; /* DID */
10546 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10547 cmd->ulpTimeout = tmo;
10549 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10550 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10551 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10553 cmd->ulpBdeCount = 1;
10555 cmd->ulpClass = CLASS3;
10557 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10558 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10560 cmd->un.elsreq64.myID = vport->fc_myDID;
10562 /* For ELS_REQUEST64_CR, use the VPI by default */
10563 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10567 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10568 if (elscmd == ELS_CMD_ECHO)
10569 cmd->ulpCt_l = 0; /* context = invalid RPI */
10571 cmd->ulpCt_l = 1; /* context = VPI */
10576 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10577 struct lpfc_vport *vport,
10578 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10579 u32 elscmd, u8 tmo, u8 expect_rsp)
10581 struct lpfc_hba *phba = vport->phba;
10582 union lpfc_wqe128 *wqe;
10583 struct ulp_bde64_le *bde;
10586 wqe = &cmdiocbq->wqe;
10587 memset(wqe, 0, sizeof(*wqe));
10589 /* Word 0 - 2 BDE */
10590 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10591 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10592 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10593 bde->type_size = cpu_to_le32(cmd_size);
10594 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10597 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10599 /* Transfer length */
10600 wqe->els_req.payload_len = cmd_size;
10601 wqe->els_req.max_response_payload_len = FCELSSIZE;
10604 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10606 /* Word 11 - ELS_ID */
10608 case ELS_CMD_PLOGI:
10609 els_id = LPFC_ELS_ID_PLOGI;
10611 case ELS_CMD_FLOGI:
10612 els_id = LPFC_ELS_ID_FLOGI;
10615 els_id = LPFC_ELS_ID_LOGO;
10617 case ELS_CMD_FDISC:
10618 if (!vport->fc_myDID) {
10619 els_id = LPFC_ELS_ID_FDISC;
10624 els_id = LPFC_ELS_ID_DEFAULT;
10628 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10631 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10633 /* Transfer length */
10634 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10636 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10637 CMD_XMIT_ELS_RSP64_WQE);
10640 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10641 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10642 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10644 /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10645 * For SLI4, since the driver controls VPIs we also want to include
10646 * all ELS pt2pt protocol traffic as well.
10648 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10649 (vport->fc_flag & FC_PT2PT)) {
10651 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10653 /* For ELS_REQUEST64_WQE, use the VPI by default */
10654 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10655 phba->vpi_ids[vport->vpi]);
10658 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10659 if (elscmd == ELS_CMD_ECHO)
10660 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10662 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10667 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10668 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10669 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10672 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10673 elscmd, tmo, expect_rsp);
10677 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10678 u16 rpi, u32 num_entry, u8 tmo)
10682 cmd = &cmdiocbq->iocb;
10683 memset(cmd, 0, sizeof(*cmd));
10685 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10686 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10687 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10688 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10690 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10691 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10692 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10694 cmd->ulpContext = rpi;
10695 cmd->ulpClass = CLASS3;
10696 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10697 cmd->ulpBdeCount = 1;
10699 cmd->ulpOwner = OWN_CHIP;
10700 cmd->ulpTimeout = tmo;
10704 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10705 u16 rpi, u32 num_entry, u8 tmo)
10707 union lpfc_wqe128 *cmdwqe;
10708 struct ulp_bde64_le *bde, *bpl;
10709 u32 xmit_len = 0, total_len = 0, size, type, i;
10711 cmdwqe = &cmdiocbq->wqe;
10712 memset(cmdwqe, 0, sizeof(*cmdwqe));
10714 /* Calculate total_len and xmit_len */
10715 bpl = (struct ulp_bde64_le *)bmp->virt;
10716 for (i = 0; i < num_entry; i++) {
10717 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10720 for (i = 0; i < num_entry; i++) {
10721 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10722 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10723 if (type != ULP_BDE64_TYPE_BDE_64)
10729 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10730 bde->addr_low = bpl->addr_low;
10731 bde->addr_high = bpl->addr_high;
10732 bde->type_size = cpu_to_le32(xmit_len);
10733 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10736 cmdwqe->gen_req.request_payload_len = xmit_len;
10739 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10740 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10741 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10742 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10745 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10748 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10749 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10750 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10751 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10754 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10758 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10759 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10761 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
10765 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
10766 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10767 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10771 icmd = &cmdiocbq->iocb;
10772 memset(icmd, 0, sizeof(*icmd));
10774 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10775 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
10776 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10777 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
10778 icmd->un.xseq64.w5.hcsw.Fctl = LA;
10780 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
10781 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
10782 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
10783 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
10785 icmd->ulpBdeCount = 1;
10787 icmd->ulpClass = CLASS3;
10789 switch (cr_cx_cmd) {
10790 case CMD_XMIT_SEQUENCE64_CR:
10791 icmd->ulpContext = rpi;
10792 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
10794 case CMD_XMIT_SEQUENCE64_CX:
10795 icmd->ulpContext = ox_id;
10796 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
10804 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
10805 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10806 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10808 union lpfc_wqe128 *wqe;
10809 struct ulp_bde64 *bpl;
10811 wqe = &cmdiocbq->wqe;
10812 memset(wqe, 0, sizeof(*wqe));
10815 bpl = (struct ulp_bde64 *)bmp->virt;
10816 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
10817 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
10818 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
10821 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
10822 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
10823 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
10824 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
10825 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
10828 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
10830 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
10831 CMD_XMIT_SEQUENCE64_WQE);
10834 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
10837 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
10840 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
10841 wqe->xmit_sequence.xmit_len = full_size;
10843 wqe->xmit_sequence.xmit_len =
10844 wqe->xmit_sequence.bde.tus.f.bdeSize;
10848 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10849 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10850 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10852 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
10853 rctl, last_seq, cr_cx_cmd);
10857 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10858 u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10860 IOCB_t *icmd = NULL;
10862 icmd = &cmdiocbq->iocb;
10863 memset(icmd, 0, sizeof(*icmd));
10866 icmd->un.acxri.abortContextTag = ulp_context;
10867 icmd->un.acxri.abortIoTag = iotag;
10871 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
10874 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
10877 icmd->ulpClass = ulp_class;
10878 icmd->ulpCommand = CMD_ABORT_XRI_CN;
10886 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10887 u16 iotag, u8 ulp_class, u16 cqid, bool ia)
10889 union lpfc_wqe128 *wqe;
10891 wqe = &cmdiocbq->wqe;
10892 memset(wqe, 0, sizeof(*wqe));
10895 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10897 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10899 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10902 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
10905 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
10908 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
10911 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10914 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
10915 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10919 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10920 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
10923 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
10928 * lpfc_sli_api_table_setup - Set up sli api function jump table
10929 * @phba: The hba struct for which this call is being executed.
10930 * @dev_grp: The HBA PCI-Device group number.
10932 * This routine sets up the SLI interface API function jump table in @phba
10934 * Returns: 0 - success, -ENODEV - failure.
10937 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10941 case LPFC_PCI_DEV_LP:
10942 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10943 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10944 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10945 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
10946 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
10947 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
10948 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
10950 case LPFC_PCI_DEV_OC:
10951 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10952 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10953 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10954 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
10955 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
10956 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
10957 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
10960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10961 "1419 Invalid HBA PCI-device group: 0x%x\n",
10969 * lpfc_sli4_calc_ring - Calculates which ring to use
10970 * @phba: Pointer to HBA context object.
10971 * @piocb: Pointer to command iocb.
10973 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10974 * hba_wqidx, thus we need to calculate the corresponding ring.
10975 * Since ABORTS must go on the same WQ of the command they are
10976 * aborting, we use command's hba_wqidx.
10978 struct lpfc_sli_ring *
10979 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10981 struct lpfc_io_buf *lpfc_cmd;
10983 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10984 if (unlikely(!phba->sli4_hba.hdwq))
10987 * for abort iocb hba_wqidx should already
10988 * be setup based on what work queue we used.
10990 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10991 lpfc_cmd = piocb->io_buf;
10992 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10994 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10996 if (unlikely(!phba->sli4_hba.els_wq))
10998 piocb->hba_wqidx = 0;
10999 return phba->sli4_hba.els_wq->pring;
11004 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11005 * @phba: Pointer to HBA context object.
11006 * @ring_number: Ring number
11007 * @piocb: Pointer to command iocb.
11008 * @flag: Flag indicating if this command can be put into txq.
11010 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11011 * function. This function gets the hbalock and calls
11012 * __lpfc_sli_issue_iocb function and will return the error returned
11013 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11014 * functions which do not hold hbalock.
11017 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11018 struct lpfc_iocbq *piocb, uint32_t flag)
11020 struct lpfc_sli_ring *pring;
11021 struct lpfc_queue *eq;
11022 unsigned long iflags;
11025 /* If the PCI channel is in offline state, do not post iocbs. */
11026 if (unlikely(pci_channel_offline(phba->pcidev)))
11029 if (phba->sli_rev == LPFC_SLI_REV4) {
11030 lpfc_sli_prep_wqe(phba, piocb);
11032 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11034 pring = lpfc_sli4_calc_ring(phba, piocb);
11035 if (unlikely(pring == NULL))
11038 spin_lock_irqsave(&pring->ring_lock, iflags);
11039 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11040 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11042 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11044 /* For now, SLI2/3 will still use hbalock */
11045 spin_lock_irqsave(&phba->hbalock, iflags);
11046 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11047 spin_unlock_irqrestore(&phba->hbalock, iflags);
11053 * lpfc_extra_ring_setup - Extra ring setup function
11054 * @phba: Pointer to HBA context object.
11056 * This function is called while driver attaches with the
11057 * HBA to setup the extra ring. The extra ring is used
11058 * only when driver needs to support target mode functionality
11059 * or IP over FC functionalities.
11061 * This function is called with no lock held. SLI3 only.
11064 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11066 struct lpfc_sli *psli;
11067 struct lpfc_sli_ring *pring;
11071 /* Adjust cmd/rsp ring iocb entries more evenly */
11073 /* Take some away from the FCP ring */
11074 pring = &psli->sli3_ring[LPFC_FCP_RING];
11075 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11076 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11077 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11078 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11080 /* and give them to the extra ring */
11081 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11083 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11084 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11085 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11086 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11088 /* Setup default profile for this ring */
11089 pring->iotag_max = 4096;
11090 pring->num_mask = 1;
11091 pring->prt[0].profile = 0; /* Mask 0 */
11092 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11093 pring->prt[0].type = phba->cfg_multi_ring_type;
11094 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11099 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11100 struct lpfc_nodelist *ndlp)
11102 unsigned long iflags;
11103 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11105 spin_lock_irqsave(&phba->hbalock, iflags);
11106 if (!list_empty(&evtp->evt_listp)) {
11107 spin_unlock_irqrestore(&phba->hbalock, iflags);
11111 /* Incrementing the reference count until the queued work is done. */
11112 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11113 if (!evtp->evt_arg1) {
11114 spin_unlock_irqrestore(&phba->hbalock, iflags);
11117 evtp->evt = LPFC_EVT_RECOVER_PORT;
11118 list_add_tail(&evtp->evt_listp, &phba->work_list);
11119 spin_unlock_irqrestore(&phba->hbalock, iflags);
11121 lpfc_worker_wake_up(phba);
11124 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11125 * @phba: Pointer to HBA context object.
11126 * @iocbq: Pointer to iocb object.
11128 * The async_event handler calls this routine when it receives
11129 * an ASYNC_STATUS_CN event from the port. The port generates
11130 * this event when an Abort Sequence request to an rport fails
11131 * twice in succession. The abort could be originated by the
11132 * driver or by the port. The ABTS could have been for an ELS
11133 * or FCP IO. The port only generates this event when an ABTS
11134 * fails to complete after one retry.
11137 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11138 struct lpfc_iocbq *iocbq)
11140 struct lpfc_nodelist *ndlp = NULL;
11141 uint16_t rpi = 0, vpi = 0;
11142 struct lpfc_vport *vport = NULL;
11144 /* The rpi in the ulpContext is vport-sensitive. */
11145 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11146 rpi = iocbq->iocb.ulpContext;
11148 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11149 "3092 Port generated ABTS async event "
11150 "on vpi %d rpi %d status 0x%x\n",
11151 vpi, rpi, iocbq->iocb.ulpStatus);
11153 vport = lpfc_find_vport_by_vpid(phba, vpi);
11156 ndlp = lpfc_findnode_rpi(vport, rpi);
11160 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11161 lpfc_sli_abts_recover_port(vport, ndlp);
11165 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11166 "3095 Event Context not found, no "
11167 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11168 vpi, rpi, iocbq->iocb.ulpStatus,
11169 iocbq->iocb.ulpContext);
11172 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11173 * @phba: pointer to HBA context object.
11174 * @ndlp: nodelist pointer for the impacted rport.
11175 * @axri: pointer to the wcqe containing the failed exchange.
11177 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11178 * port. The port generates this event when an abort exchange request to an
11179 * rport fails twice in succession with no reply. The abort could be originated
11180 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11183 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11184 struct lpfc_nodelist *ndlp,
11185 struct sli4_wcqe_xri_aborted *axri)
11187 uint32_t ext_status = 0;
11190 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11191 "3115 Node Context not found, driver "
11192 "ignoring abts err event\n");
11196 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11197 "3116 Port generated FCP XRI ABORT event on "
11198 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11199 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11200 bf_get(lpfc_wcqe_xa_xri, axri),
11201 bf_get(lpfc_wcqe_xa_status, axri),
11205 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11206 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11207 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11209 ext_status = axri->parameter & IOERR_PARAM_MASK;
11210 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11211 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11212 lpfc_sli_post_recovery_event(phba, ndlp);
11216 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11217 * @phba: Pointer to HBA context object.
11218 * @pring: Pointer to driver SLI ring object.
11219 * @iocbq: Pointer to iocb object.
11221 * This function is called by the slow ring event handler
11222 * function when there is an ASYNC event iocb in the ring.
11223 * This function is called with no lock held.
11224 * Currently this function handles only temperature related
11225 * ASYNC events. The function decodes the temperature sensor
11226 * event message and posts events for the management applications.
11229 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11230 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11234 struct temp_event temp_event_data;
11235 struct Scsi_Host *shost;
11238 icmd = &iocbq->iocb;
11239 evt_code = icmd->un.asyncstat.evt_code;
11241 switch (evt_code) {
11242 case ASYNC_TEMP_WARN:
11243 case ASYNC_TEMP_SAFE:
11244 temp_event_data.data = (uint32_t) icmd->ulpContext;
11245 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11246 if (evt_code == ASYNC_TEMP_WARN) {
11247 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11249 "0347 Adapter is very hot, please take "
11250 "corrective action. temperature : %d Celsius\n",
11251 (uint32_t) icmd->ulpContext);
11253 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11255 "0340 Adapter temperature is OK now. "
11256 "temperature : %d Celsius\n",
11257 (uint32_t) icmd->ulpContext);
11260 /* Send temperature change event to applications */
11261 shost = lpfc_shost_from_vport(phba->pport);
11262 fc_host_post_vendor_event(shost, fc_get_event_number(),
11263 sizeof(temp_event_data), (char *) &temp_event_data,
11264 LPFC_NL_VENDOR_ID);
11266 case ASYNC_STATUS_CN:
11267 lpfc_sli_abts_err_handler(phba, iocbq);
11270 iocb_w = (uint32_t *) icmd;
11271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11272 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11274 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11275 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11276 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11277 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11278 pring->ringno, icmd->un.asyncstat.evt_code,
11279 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11280 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11281 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11282 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11290 * lpfc_sli4_setup - SLI ring setup function
11291 * @phba: Pointer to HBA context object.
11293 * lpfc_sli_setup sets up rings of the SLI interface with
11294 * number of iocbs per ring and iotags. This function is
11295 * called while driver attach to the HBA and before the
11296 * interrupts are enabled. So there is no need for locking.
11298 * This function always returns 0.
11301 lpfc_sli4_setup(struct lpfc_hba *phba)
11303 struct lpfc_sli_ring *pring;
11305 pring = phba->sli4_hba.els_wq->pring;
11306 pring->num_mask = LPFC_MAX_RING_MASK;
11307 pring->prt[0].profile = 0; /* Mask 0 */
11308 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11309 pring->prt[0].type = FC_TYPE_ELS;
11310 pring->prt[0].lpfc_sli_rcv_unsol_event =
11311 lpfc_els_unsol_event;
11312 pring->prt[1].profile = 0; /* Mask 1 */
11313 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11314 pring->prt[1].type = FC_TYPE_ELS;
11315 pring->prt[1].lpfc_sli_rcv_unsol_event =
11316 lpfc_els_unsol_event;
11317 pring->prt[2].profile = 0; /* Mask 2 */
11318 /* NameServer Inquiry */
11319 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11321 pring->prt[2].type = FC_TYPE_CT;
11322 pring->prt[2].lpfc_sli_rcv_unsol_event =
11323 lpfc_ct_unsol_event;
11324 pring->prt[3].profile = 0; /* Mask 3 */
11325 /* NameServer response */
11326 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11328 pring->prt[3].type = FC_TYPE_CT;
11329 pring->prt[3].lpfc_sli_rcv_unsol_event =
11330 lpfc_ct_unsol_event;
11335 * lpfc_sli_setup - SLI ring setup function
11336 * @phba: Pointer to HBA context object.
11338 * lpfc_sli_setup sets up rings of the SLI interface with
11339 * number of iocbs per ring and iotags. This function is
11340 * called while driver attach to the HBA and before the
11341 * interrupts are enabled. So there is no need for locking.
11343 * This function always returns 0. SLI3 only.
11346 lpfc_sli_setup(struct lpfc_hba *phba)
11348 int i, totiocbsize = 0;
11349 struct lpfc_sli *psli = &phba->sli;
11350 struct lpfc_sli_ring *pring;
11352 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11353 psli->sli_flag = 0;
11355 psli->iocbq_lookup = NULL;
11356 psli->iocbq_lookup_len = 0;
11357 psli->last_iotag = 0;
11359 for (i = 0; i < psli->num_rings; i++) {
11360 pring = &psli->sli3_ring[i];
11362 case LPFC_FCP_RING: /* ring 0 - FCP */
11363 /* numCiocb and numRiocb are used in config_port */
11364 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11365 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11366 pring->sli.sli3.numCiocb +=
11367 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11368 pring->sli.sli3.numRiocb +=
11369 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11370 pring->sli.sli3.numCiocb +=
11371 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11372 pring->sli.sli3.numRiocb +=
11373 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11374 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11375 SLI3_IOCB_CMD_SIZE :
11376 SLI2_IOCB_CMD_SIZE;
11377 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11378 SLI3_IOCB_RSP_SIZE :
11379 SLI2_IOCB_RSP_SIZE;
11380 pring->iotag_ctr = 0;
11382 (phba->cfg_hba_queue_depth * 2);
11383 pring->fast_iotag = pring->iotag_max;
11384 pring->num_mask = 0;
11386 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11387 /* numCiocb and numRiocb are used in config_port */
11388 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11389 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11390 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11391 SLI3_IOCB_CMD_SIZE :
11392 SLI2_IOCB_CMD_SIZE;
11393 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11394 SLI3_IOCB_RSP_SIZE :
11395 SLI2_IOCB_RSP_SIZE;
11396 pring->iotag_max = phba->cfg_hba_queue_depth;
11397 pring->num_mask = 0;
11399 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11400 /* numCiocb and numRiocb are used in config_port */
11401 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11402 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11403 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11404 SLI3_IOCB_CMD_SIZE :
11405 SLI2_IOCB_CMD_SIZE;
11406 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11407 SLI3_IOCB_RSP_SIZE :
11408 SLI2_IOCB_RSP_SIZE;
11409 pring->fast_iotag = 0;
11410 pring->iotag_ctr = 0;
11411 pring->iotag_max = 4096;
11412 pring->lpfc_sli_rcv_async_status =
11413 lpfc_sli_async_event_handler;
11414 pring->num_mask = LPFC_MAX_RING_MASK;
11415 pring->prt[0].profile = 0; /* Mask 0 */
11416 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11417 pring->prt[0].type = FC_TYPE_ELS;
11418 pring->prt[0].lpfc_sli_rcv_unsol_event =
11419 lpfc_els_unsol_event;
11420 pring->prt[1].profile = 0; /* Mask 1 */
11421 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11422 pring->prt[1].type = FC_TYPE_ELS;
11423 pring->prt[1].lpfc_sli_rcv_unsol_event =
11424 lpfc_els_unsol_event;
11425 pring->prt[2].profile = 0; /* Mask 2 */
11426 /* NameServer Inquiry */
11427 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11429 pring->prt[2].type = FC_TYPE_CT;
11430 pring->prt[2].lpfc_sli_rcv_unsol_event =
11431 lpfc_ct_unsol_event;
11432 pring->prt[3].profile = 0; /* Mask 3 */
11433 /* NameServer response */
11434 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11436 pring->prt[3].type = FC_TYPE_CT;
11437 pring->prt[3].lpfc_sli_rcv_unsol_event =
11438 lpfc_ct_unsol_event;
11441 totiocbsize += (pring->sli.sli3.numCiocb *
11442 pring->sli.sli3.sizeCiocb) +
11443 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11445 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11446 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11447 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11448 "SLI2 SLIM Data: x%x x%lx\n",
11449 phba->brd_no, totiocbsize,
11450 (unsigned long) MAX_SLIM_IOCB_SIZE);
11452 if (phba->cfg_multi_ring_support == 2)
11453 lpfc_extra_ring_setup(phba);
11459 * lpfc_sli4_queue_init - Queue initialization function
11460 * @phba: Pointer to HBA context object.
11462 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11463 * ring. This function also initializes ring indices of each ring.
11464 * This function is called during the initialization of the SLI
11465 * interface of an HBA.
11466 * This function is called with no lock held and always returns
11470 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11472 struct lpfc_sli *psli;
11473 struct lpfc_sli_ring *pring;
11477 spin_lock_irq(&phba->hbalock);
11478 INIT_LIST_HEAD(&psli->mboxq);
11479 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11480 /* Initialize list headers for txq and txcmplq as double linked lists */
11481 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11482 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11484 pring->ringno = LPFC_FCP_RING;
11485 pring->txcmplq_cnt = 0;
11486 INIT_LIST_HEAD(&pring->txq);
11487 INIT_LIST_HEAD(&pring->txcmplq);
11488 INIT_LIST_HEAD(&pring->iocb_continueq);
11489 spin_lock_init(&pring->ring_lock);
11491 pring = phba->sli4_hba.els_wq->pring;
11493 pring->ringno = LPFC_ELS_RING;
11494 pring->txcmplq_cnt = 0;
11495 INIT_LIST_HEAD(&pring->txq);
11496 INIT_LIST_HEAD(&pring->txcmplq);
11497 INIT_LIST_HEAD(&pring->iocb_continueq);
11498 spin_lock_init(&pring->ring_lock);
11500 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11501 pring = phba->sli4_hba.nvmels_wq->pring;
11503 pring->ringno = LPFC_ELS_RING;
11504 pring->txcmplq_cnt = 0;
11505 INIT_LIST_HEAD(&pring->txq);
11506 INIT_LIST_HEAD(&pring->txcmplq);
11507 INIT_LIST_HEAD(&pring->iocb_continueq);
11508 spin_lock_init(&pring->ring_lock);
11511 spin_unlock_irq(&phba->hbalock);
11515 * lpfc_sli_queue_init - Queue initialization function
11516 * @phba: Pointer to HBA context object.
11518 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11519 * ring. This function also initializes ring indices of each ring.
11520 * This function is called during the initialization of the SLI
11521 * interface of an HBA.
11522 * This function is called with no lock held and always returns
11526 lpfc_sli_queue_init(struct lpfc_hba *phba)
11528 struct lpfc_sli *psli;
11529 struct lpfc_sli_ring *pring;
11533 spin_lock_irq(&phba->hbalock);
11534 INIT_LIST_HEAD(&psli->mboxq);
11535 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11536 /* Initialize list headers for txq and txcmplq as double linked lists */
11537 for (i = 0; i < psli->num_rings; i++) {
11538 pring = &psli->sli3_ring[i];
11540 pring->sli.sli3.next_cmdidx = 0;
11541 pring->sli.sli3.local_getidx = 0;
11542 pring->sli.sli3.cmdidx = 0;
11543 INIT_LIST_HEAD(&pring->iocb_continueq);
11544 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11545 INIT_LIST_HEAD(&pring->postbufq);
11547 INIT_LIST_HEAD(&pring->txq);
11548 INIT_LIST_HEAD(&pring->txcmplq);
11549 spin_lock_init(&pring->ring_lock);
11551 spin_unlock_irq(&phba->hbalock);
11555 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11556 * @phba: Pointer to HBA context object.
11558 * This routine flushes the mailbox command subsystem. It will unconditionally
11559 * flush all the mailbox commands in the three possible stages in the mailbox
11560 * command sub-system: pending mailbox command queue; the outstanding mailbox
11561 * command; and completed mailbox command queue. It is caller's responsibility
11562 * to make sure that the driver is in the proper state to flush the mailbox
11563 * command sub-system. Namely, the posting of mailbox commands into the
11564 * pending mailbox command queue from the various clients must be stopped;
11565 * either the HBA is in a state that it will never works on the outstanding
11566 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11567 * mailbox command has been completed.
11570 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11572 LIST_HEAD(completions);
11573 struct lpfc_sli *psli = &phba->sli;
11575 unsigned long iflag;
11577 /* Disable softirqs, including timers from obtaining phba->hbalock */
11578 local_bh_disable();
11580 /* Flush all the mailbox commands in the mbox system */
11581 spin_lock_irqsave(&phba->hbalock, iflag);
11583 /* The pending mailbox command queue */
11584 list_splice_init(&phba->sli.mboxq, &completions);
11585 /* The outstanding active mailbox command */
11586 if (psli->mbox_active) {
11587 list_add_tail(&psli->mbox_active->list, &completions);
11588 psli->mbox_active = NULL;
11589 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11591 /* The completed mailbox command queue */
11592 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11593 spin_unlock_irqrestore(&phba->hbalock, iflag);
11595 /* Enable softirqs again, done with phba->hbalock */
11598 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11599 while (!list_empty(&completions)) {
11600 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11601 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11602 if (pmb->mbox_cmpl)
11603 pmb->mbox_cmpl(phba, pmb);
11608 * lpfc_sli_host_down - Vport cleanup function
11609 * @vport: Pointer to virtual port object.
11611 * lpfc_sli_host_down is called to clean up the resources
11612 * associated with a vport before destroying virtual
11613 * port data structures.
11614 * This function does following operations:
11615 * - Free discovery resources associated with this virtual
11617 * - Free iocbs associated with this virtual port in
11619 * - Send abort for all iocb commands associated with this
11620 * vport in txcmplq.
11622 * This function is called with no lock held and always returns 1.
11625 lpfc_sli_host_down(struct lpfc_vport *vport)
11627 LIST_HEAD(completions);
11628 struct lpfc_hba *phba = vport->phba;
11629 struct lpfc_sli *psli = &phba->sli;
11630 struct lpfc_queue *qp = NULL;
11631 struct lpfc_sli_ring *pring;
11632 struct lpfc_iocbq *iocb, *next_iocb;
11634 unsigned long flags = 0;
11635 uint16_t prev_pring_flag;
11637 lpfc_cleanup_discovery_resources(vport);
11639 spin_lock_irqsave(&phba->hbalock, flags);
11642 * Error everything on the txq since these iocbs
11643 * have not been given to the FW yet.
11644 * Also issue ABTS for everything on the txcmplq
11646 if (phba->sli_rev != LPFC_SLI_REV4) {
11647 for (i = 0; i < psli->num_rings; i++) {
11648 pring = &psli->sli3_ring[i];
11649 prev_pring_flag = pring->flag;
11650 /* Only slow rings */
11651 if (pring->ringno == LPFC_ELS_RING) {
11652 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11653 /* Set the lpfc data pending flag */
11654 set_bit(LPFC_DATA_READY, &phba->data_flags);
11656 list_for_each_entry_safe(iocb, next_iocb,
11657 &pring->txq, list) {
11658 if (iocb->vport != vport)
11660 list_move_tail(&iocb->list, &completions);
11662 list_for_each_entry_safe(iocb, next_iocb,
11663 &pring->txcmplq, list) {
11664 if (iocb->vport != vport)
11666 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11669 pring->flag = prev_pring_flag;
11672 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11676 if (pring == phba->sli4_hba.els_wq->pring) {
11677 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11678 /* Set the lpfc data pending flag */
11679 set_bit(LPFC_DATA_READY, &phba->data_flags);
11681 prev_pring_flag = pring->flag;
11682 spin_lock(&pring->ring_lock);
11683 list_for_each_entry_safe(iocb, next_iocb,
11684 &pring->txq, list) {
11685 if (iocb->vport != vport)
11687 list_move_tail(&iocb->list, &completions);
11689 spin_unlock(&pring->ring_lock);
11690 list_for_each_entry_safe(iocb, next_iocb,
11691 &pring->txcmplq, list) {
11692 if (iocb->vport != vport)
11694 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11697 pring->flag = prev_pring_flag;
11700 spin_unlock_irqrestore(&phba->hbalock, flags);
11702 /* Make sure HBA is alive */
11703 lpfc_issue_hb_tmo(phba);
11705 /* Cancel all the IOCBs from the completions list */
11706 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11712 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11713 * @phba: Pointer to HBA context object.
11715 * This function cleans up all iocb, buffers, mailbox commands
11716 * while shutting down the HBA. This function is called with no
11717 * lock held and always returns 1.
11718 * This function does the following to cleanup driver resources:
11719 * - Free discovery resources for each virtual port
11720 * - Cleanup any pending fabric iocbs
11721 * - Iterate through the iocb txq and free each entry
11723 * - Free up any buffer posted to the HBA
11724 * - Free mailbox commands in the mailbox queue.
11727 lpfc_sli_hba_down(struct lpfc_hba *phba)
11729 LIST_HEAD(completions);
11730 struct lpfc_sli *psli = &phba->sli;
11731 struct lpfc_queue *qp = NULL;
11732 struct lpfc_sli_ring *pring;
11733 struct lpfc_dmabuf *buf_ptr;
11734 unsigned long flags = 0;
11737 /* Shutdown the mailbox command sub-system */
11738 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11740 lpfc_hba_down_prep(phba);
11742 /* Disable softirqs, including timers from obtaining phba->hbalock */
11743 local_bh_disable();
11745 lpfc_fabric_abort_hba(phba);
11747 spin_lock_irqsave(&phba->hbalock, flags);
11750 * Error everything on the txq since these iocbs
11751 * have not been given to the FW yet.
11753 if (phba->sli_rev != LPFC_SLI_REV4) {
11754 for (i = 0; i < psli->num_rings; i++) {
11755 pring = &psli->sli3_ring[i];
11756 /* Only slow rings */
11757 if (pring->ringno == LPFC_ELS_RING) {
11758 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11759 /* Set the lpfc data pending flag */
11760 set_bit(LPFC_DATA_READY, &phba->data_flags);
11762 list_splice_init(&pring->txq, &completions);
11765 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11769 spin_lock(&pring->ring_lock);
11770 list_splice_init(&pring->txq, &completions);
11771 spin_unlock(&pring->ring_lock);
11772 if (pring == phba->sli4_hba.els_wq->pring) {
11773 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11774 /* Set the lpfc data pending flag */
11775 set_bit(LPFC_DATA_READY, &phba->data_flags);
11779 spin_unlock_irqrestore(&phba->hbalock, flags);
11781 /* Cancel all the IOCBs from the completions list */
11782 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11785 spin_lock_irqsave(&phba->hbalock, flags);
11786 list_splice_init(&phba->elsbuf, &completions);
11787 phba->elsbuf_cnt = 0;
11788 phba->elsbuf_prev_cnt = 0;
11789 spin_unlock_irqrestore(&phba->hbalock, flags);
11791 while (!list_empty(&completions)) {
11792 list_remove_head(&completions, buf_ptr,
11793 struct lpfc_dmabuf, list);
11794 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11798 /* Enable softirqs again, done with phba->hbalock */
11801 /* Return any active mbox cmds */
11802 del_timer_sync(&psli->mbox_tmo);
11804 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11805 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11806 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11812 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11813 * @srcp: Source memory pointer.
11814 * @destp: Destination memory pointer.
11815 * @cnt: Number of words required to be copied.
11817 * This function is used for copying data between driver memory
11818 * and the SLI memory. This function also changes the endianness
11819 * of each word if native endianness is different from SLI
11820 * endianness. This function can be called with or without
11824 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11826 uint32_t *src = srcp;
11827 uint32_t *dest = destp;
11831 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11833 ldata = le32_to_cpu(ldata);
11842 * lpfc_sli_bemem_bcopy - SLI memory copy function
11843 * @srcp: Source memory pointer.
11844 * @destp: Destination memory pointer.
11845 * @cnt: Number of words required to be copied.
11847 * This function is used for copying data between a data structure
11848 * with big endian representation to local endianness.
11849 * This function can be called with or without lock.
11852 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11854 uint32_t *src = srcp;
11855 uint32_t *dest = destp;
11859 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11861 ldata = be32_to_cpu(ldata);
11869 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11870 * @phba: Pointer to HBA context object.
11871 * @pring: Pointer to driver SLI ring object.
11872 * @mp: Pointer to driver buffer object.
11874 * This function is called with no lock held.
11875 * It always return zero after adding the buffer to the postbufq
11879 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11880 struct lpfc_dmabuf *mp)
11882 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11884 spin_lock_irq(&phba->hbalock);
11885 list_add_tail(&mp->list, &pring->postbufq);
11886 pring->postbufq_cnt++;
11887 spin_unlock_irq(&phba->hbalock);
11892 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11893 * @phba: Pointer to HBA context object.
11895 * When HBQ is enabled, buffers are searched based on tags. This function
11896 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11897 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11898 * does not conflict with tags of buffer posted for unsolicited events.
11899 * The function returns the allocated tag. The function is called with
11903 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11905 spin_lock_irq(&phba->hbalock);
11906 phba->buffer_tag_count++;
11908 * Always set the QUE_BUFTAG_BIT to distiguish between
11909 * a tag assigned by HBQ.
11911 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11912 spin_unlock_irq(&phba->hbalock);
11913 return phba->buffer_tag_count;
11917 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11918 * @phba: Pointer to HBA context object.
11919 * @pring: Pointer to driver SLI ring object.
11920 * @tag: Buffer tag.
11922 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11923 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11924 * iocb is posted to the response ring with the tag of the buffer.
11925 * This function searches the pring->postbufq list using the tag
11926 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11927 * iocb. If the buffer is found then lpfc_dmabuf object of the
11928 * buffer is returned to the caller else NULL is returned.
11929 * This function is called with no lock held.
11931 struct lpfc_dmabuf *
11932 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11935 struct lpfc_dmabuf *mp, *next_mp;
11936 struct list_head *slp = &pring->postbufq;
11938 /* Search postbufq, from the beginning, looking for a match on tag */
11939 spin_lock_irq(&phba->hbalock);
11940 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11941 if (mp->buffer_tag == tag) {
11942 list_del_init(&mp->list);
11943 pring->postbufq_cnt--;
11944 spin_unlock_irq(&phba->hbalock);
11949 spin_unlock_irq(&phba->hbalock);
11950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11951 "0402 Cannot find virtual addr for buffer tag on "
11952 "ring %d Data x%lx x%px x%px x%x\n",
11953 pring->ringno, (unsigned long) tag,
11954 slp->next, slp->prev, pring->postbufq_cnt);
11960 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11961 * @phba: Pointer to HBA context object.
11962 * @pring: Pointer to driver SLI ring object.
11963 * @phys: DMA address of the buffer.
11965 * This function searches the buffer list using the dma_address
11966 * of unsolicited event to find the driver's lpfc_dmabuf object
11967 * corresponding to the dma_address. The function returns the
11968 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11969 * This function is called by the ct and els unsolicited event
11970 * handlers to get the buffer associated with the unsolicited
11973 * This function is called with no lock held.
11975 struct lpfc_dmabuf *
11976 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11979 struct lpfc_dmabuf *mp, *next_mp;
11980 struct list_head *slp = &pring->postbufq;
11982 /* Search postbufq, from the beginning, looking for a match on phys */
11983 spin_lock_irq(&phba->hbalock);
11984 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11985 if (mp->phys == phys) {
11986 list_del_init(&mp->list);
11987 pring->postbufq_cnt--;
11988 spin_unlock_irq(&phba->hbalock);
11993 spin_unlock_irq(&phba->hbalock);
11994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11995 "0410 Cannot find virtual addr for mapped buf on "
11996 "ring %d Data x%llx x%px x%px x%x\n",
11997 pring->ringno, (unsigned long long)phys,
11998 slp->next, slp->prev, pring->postbufq_cnt);
12003 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12004 * @phba: Pointer to HBA context object.
12005 * @cmdiocb: Pointer to driver command iocb object.
12006 * @rspiocb: Pointer to driver response iocb object.
12008 * This function is the completion handler for the abort iocbs for
12009 * ELS commands. This function is called from the ELS ring event
12010 * handler with no lock held. This function frees memory resources
12011 * associated with the abort iocb.
12014 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12015 struct lpfc_iocbq *rspiocb)
12017 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12018 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12019 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12023 * Assume that the port already completed and returned, or
12024 * will return the iocb. Just Log the message.
12026 if (phba->sli_rev < LPFC_SLI_REV4) {
12027 if (cmnd == CMD_ABORT_XRI_CX &&
12028 ulp_status == IOSTAT_LOCAL_REJECT &&
12029 ulp_word4 == IOERR_ABORT_REQUESTED) {
12034 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12035 "0327 Cannot abort els iocb x%px "
12036 "with io cmd xri %x abort tag : x%x, "
12037 "abort status %x abort code %x\n",
12038 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12039 (phba->sli_rev == LPFC_SLI_REV4) ?
12040 get_wqe_reqtag(cmdiocb) :
12041 cmdiocb->iocb.un.acxri.abortContextTag,
12042 ulp_status, ulp_word4);
12046 lpfc_sli_release_iocbq(phba, cmdiocb);
12051 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12052 * @phba: Pointer to HBA context object.
12053 * @cmdiocb: Pointer to driver command iocb object.
12054 * @rspiocb: Pointer to driver response iocb object.
12056 * The function is called from SLI ring event handler with no
12057 * lock held. This function is the completion handler for ELS commands
12058 * which are aborted. The function frees memory resources used for
12059 * the aborted ELS commands.
12062 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12063 struct lpfc_iocbq *rspiocb)
12065 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12067 LPFC_MBOXQ_t *mbox;
12068 u32 ulp_command, ulp_status, ulp_word4, iotag;
12070 ulp_command = get_job_cmnd(phba, cmdiocb);
12071 ulp_status = get_job_ulpstatus(phba, rspiocb);
12072 ulp_word4 = get_job_word4(phba, rspiocb);
12074 if (phba->sli_rev == LPFC_SLI_REV4) {
12075 iotag = get_wqe_reqtag(cmdiocb);
12077 irsp = &rspiocb->iocb;
12078 iotag = irsp->ulpIoTag;
12080 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12081 * The MBX_REG_LOGIN64 mbox command is freed back to the
12082 * mbox_mem_pool here.
12084 if (cmdiocb->context_un.mbox) {
12085 mbox = cmdiocb->context_un.mbox;
12086 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12087 cmdiocb->context_un.mbox = NULL;
12091 /* ELS cmd tag <ulpIoTag> completes */
12092 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12093 "0139 Ignoring ELS cmd code x%x completion Data: "
12094 "x%x x%x x%x x%px\n",
12095 ulp_command, ulp_status, ulp_word4, iotag,
12098 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12099 * if exchange is busy.
12101 if (ulp_command == CMD_GEN_REQUEST64_CR)
12102 lpfc_ct_free_iocb(phba, cmdiocb);
12104 lpfc_els_free_iocb(phba, cmdiocb);
12106 lpfc_nlp_put(ndlp);
12110 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12111 * @phba: Pointer to HBA context object.
12112 * @pring: Pointer to driver SLI ring object.
12113 * @cmdiocb: Pointer to driver command iocb object.
12114 * @cmpl: completion function.
12116 * This function issues an abort iocb for the provided command iocb. In case
12117 * of unloading, the abort iocb will not be issued to commands on the ELS
12118 * ring. Instead, the callback function shall be changed to those commands
12119 * so that nothing happens when them finishes. This function is called with
12120 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12121 * when the command iocb is an abort request.
12125 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12126 struct lpfc_iocbq *cmdiocb, void *cmpl)
12128 struct lpfc_vport *vport = cmdiocb->vport;
12129 struct lpfc_iocbq *abtsiocbp;
12130 int retval = IOCB_ERROR;
12131 unsigned long iflags;
12132 struct lpfc_nodelist *ndlp = NULL;
12133 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12134 u16 ulp_context, iotag;
12138 * There are certain command types we don't want to abort. And we
12139 * don't want to abort commands that are already in the process of
12142 if (ulp_command == CMD_ABORT_XRI_WQE ||
12143 ulp_command == CMD_ABORT_XRI_CN ||
12144 ulp_command == CMD_CLOSE_XRI_CN ||
12145 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12146 return IOCB_ABORTING;
12149 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12150 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12152 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12157 * If we're unloading, don't abort iocb on the ELS ring, but change
12158 * the callback so that nothing happens when it finishes.
12160 if ((vport->load_flag & FC_UNLOADING) &&
12161 pring->ringno == LPFC_ELS_RING) {
12162 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12163 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12165 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12169 /* issue ABTS for this IOCB based on iotag */
12170 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12171 if (abtsiocbp == NULL)
12172 return IOCB_NORESOURCE;
12174 /* This signals the response to set the correct status
12175 * before calling the completion handler
12177 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12179 if (phba->sli_rev == LPFC_SLI_REV4) {
12180 ulp_context = cmdiocb->sli4_xritag;
12181 iotag = abtsiocbp->iotag;
12183 iotag = cmdiocb->iocb.ulpIoTag;
12184 if (pring->ringno == LPFC_ELS_RING) {
12185 ndlp = cmdiocb->ndlp;
12186 ulp_context = ndlp->nlp_rpi;
12188 ulp_context = cmdiocb->iocb.ulpContext;
12192 if (phba->link_state < LPFC_LINK_UP ||
12193 (phba->sli_rev == LPFC_SLI_REV4 &&
12194 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12195 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12200 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12201 cmdiocb->iocb.ulpClass,
12202 LPFC_WQE_CQ_ID_DEFAULT, ia);
12204 abtsiocbp->vport = vport;
12206 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12207 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12208 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12209 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12211 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12212 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12215 abtsiocbp->cmd_cmpl = cmpl;
12217 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12218 abtsiocbp->vport = vport;
12220 if (phba->sli_rev == LPFC_SLI_REV4) {
12221 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12222 if (unlikely(pring == NULL))
12223 goto abort_iotag_exit;
12224 /* Note: both hbalock and ring_lock need to be set here */
12225 spin_lock_irqsave(&pring->ring_lock, iflags);
12226 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12228 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12230 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12236 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12237 "0339 Abort IO XRI x%x, Original iotag x%x, "
12238 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12240 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12241 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12244 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12245 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12249 * Caller to this routine should check for IOCB_ERROR
12250 * and handle it properly. This routine no longer removes
12251 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12257 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12258 * @phba: pointer to lpfc HBA data structure.
12260 * This routine will abort all pending and outstanding iocbs to an HBA.
12263 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12265 struct lpfc_sli *psli = &phba->sli;
12266 struct lpfc_sli_ring *pring;
12267 struct lpfc_queue *qp = NULL;
12270 if (phba->sli_rev != LPFC_SLI_REV4) {
12271 for (i = 0; i < psli->num_rings; i++) {
12272 pring = &psli->sli3_ring[i];
12273 lpfc_sli_abort_iocb_ring(phba, pring);
12277 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12281 lpfc_sli_abort_iocb_ring(phba, pring);
12286 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12287 * @iocbq: Pointer to iocb object.
12288 * @vport: Pointer to driver virtual port object.
12290 * This function acts as an iocb filter for functions which abort FCP iocbs.
12293 * -ENODEV, if a null iocb or vport ptr is encountered
12294 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12295 * driver already started the abort process, or is an abort iocb itself
12296 * 0, passes criteria for aborting the FCP I/O iocb
12299 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12300 struct lpfc_vport *vport)
12304 /* No null ptr vports */
12305 if (!iocbq || iocbq->vport != vport)
12308 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12309 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12311 ulp_command = get_job_cmnd(vport->phba, iocbq);
12312 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12313 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12314 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12315 (ulp_command == CMD_ABORT_XRI_CN ||
12316 ulp_command == CMD_CLOSE_XRI_CN ||
12317 ulp_command == CMD_ABORT_XRI_WQE))
12324 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12325 * @iocbq: Pointer to driver iocb object.
12326 * @vport: Pointer to driver virtual port object.
12327 * @tgt_id: SCSI ID of the target.
12328 * @lun_id: LUN ID of the scsi device.
12329 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12331 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12335 * 0 if the filtering criteria is met for the given iocb and will return
12336 * 1 if the filtering criteria is not met.
12337 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12338 * given iocb is for the SCSI device specified by vport, tgt_id and
12339 * lun_id parameter.
12340 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12341 * given iocb is for the SCSI target specified by vport and tgt_id
12343 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12344 * given iocb is for the SCSI host associated with the given vport.
12345 * This function is called with no locks held.
12348 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12349 uint16_t tgt_id, uint64_t lun_id,
12350 lpfc_ctx_cmd ctx_cmd)
12352 struct lpfc_io_buf *lpfc_cmd;
12355 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12357 if (lpfc_cmd->pCmd == NULL)
12362 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12363 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12364 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12368 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12369 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12372 case LPFC_CTX_HOST:
12376 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12377 __func__, ctx_cmd);
12385 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12386 * @vport: Pointer to virtual port.
12387 * @tgt_id: SCSI ID of the target.
12388 * @lun_id: LUN ID of the scsi device.
12389 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12391 * This function returns number of FCP commands pending for the vport.
12392 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12393 * commands pending on the vport associated with SCSI device specified
12394 * by tgt_id and lun_id parameters.
12395 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12396 * commands pending on the vport associated with SCSI target specified
12397 * by tgt_id parameter.
12398 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12399 * commands pending on the vport.
12400 * This function returns the number of iocbs which satisfy the filter.
12401 * This function is called without any lock held.
12404 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12405 lpfc_ctx_cmd ctx_cmd)
12407 struct lpfc_hba *phba = vport->phba;
12408 struct lpfc_iocbq *iocbq;
12410 unsigned long iflags;
12413 spin_lock_irqsave(&phba->hbalock, iflags);
12414 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12415 iocbq = phba->sli.iocbq_lookup[i];
12417 if (!iocbq || iocbq->vport != vport)
12419 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12420 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12423 /* Include counting outstanding aborts */
12424 ulp_command = get_job_cmnd(phba, iocbq);
12425 if (ulp_command == CMD_ABORT_XRI_CN ||
12426 ulp_command == CMD_CLOSE_XRI_CN ||
12427 ulp_command == CMD_ABORT_XRI_WQE) {
12432 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12436 spin_unlock_irqrestore(&phba->hbalock, iflags);
12442 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12443 * @phba: Pointer to HBA context object
12444 * @cmdiocb: Pointer to command iocb object.
12445 * @rspiocb: Pointer to response iocb object.
12447 * This function is called when an aborted FCP iocb completes. This
12448 * function is called by the ring event handler with no lock held.
12449 * This function frees the iocb.
12452 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12453 struct lpfc_iocbq *rspiocb)
12455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12456 "3096 ABORT_XRI_CX completing on rpi x%x "
12457 "original iotag x%x, abort cmd iotag x%x "
12458 "status 0x%x, reason 0x%x\n",
12459 (phba->sli_rev == LPFC_SLI_REV4) ?
12460 cmdiocb->sli4_xritag :
12461 cmdiocb->iocb.un.acxri.abortContextTag,
12462 get_job_abtsiotag(phba, cmdiocb),
12463 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12464 get_job_word4(phba, rspiocb));
12465 lpfc_sli_release_iocbq(phba, cmdiocb);
12470 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12471 * @vport: Pointer to virtual port.
12472 * @tgt_id: SCSI ID of the target.
12473 * @lun_id: LUN ID of the scsi device.
12474 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12476 * This function sends an abort command for every SCSI command
12477 * associated with the given virtual port pending on the ring
12478 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12479 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12480 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12481 * followed by lpfc_sli_validate_fcp_iocb.
12483 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12484 * FCP iocbs associated with lun specified by tgt_id and lun_id
12486 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12487 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12488 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12489 * FCP iocbs associated with virtual port.
12490 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12491 * lpfc_sli4_calc_ring is used.
12492 * This function returns number of iocbs it failed to abort.
12493 * This function is called with no locks held.
12496 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12497 lpfc_ctx_cmd abort_cmd)
12499 struct lpfc_hba *phba = vport->phba;
12500 struct lpfc_sli_ring *pring = NULL;
12501 struct lpfc_iocbq *iocbq;
12502 int errcnt = 0, ret_val = 0;
12503 unsigned long iflags;
12506 /* all I/Os are in process of being flushed */
12507 if (phba->hba_flag & HBA_IOQ_FLUSH)
12510 for (i = 1; i <= phba->sli.last_iotag; i++) {
12511 iocbq = phba->sli.iocbq_lookup[i];
12513 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12516 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12520 spin_lock_irqsave(&phba->hbalock, iflags);
12521 if (phba->sli_rev == LPFC_SLI_REV3) {
12522 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12523 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12524 pring = lpfc_sli4_calc_ring(phba, iocbq);
12526 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12527 lpfc_sli_abort_fcp_cmpl);
12528 spin_unlock_irqrestore(&phba->hbalock, iflags);
12529 if (ret_val != IOCB_SUCCESS)
12537 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12538 * @vport: Pointer to virtual port.
12539 * @pring: Pointer to driver SLI ring object.
12540 * @tgt_id: SCSI ID of the target.
12541 * @lun_id: LUN ID of the scsi device.
12542 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12544 * This function sends an abort command for every SCSI command
12545 * associated with the given virtual port pending on the ring
12546 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12547 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12548 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12549 * followed by lpfc_sli_validate_fcp_iocb.
12551 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12552 * FCP iocbs associated with lun specified by tgt_id and lun_id
12554 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12555 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12556 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12557 * FCP iocbs associated with virtual port.
12558 * This function returns number of iocbs it aborted .
12559 * This function is called with no locks held right after a taskmgmt
12563 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12564 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12566 struct lpfc_hba *phba = vport->phba;
12567 struct lpfc_io_buf *lpfc_cmd;
12568 struct lpfc_iocbq *abtsiocbq;
12569 struct lpfc_nodelist *ndlp = NULL;
12570 struct lpfc_iocbq *iocbq;
12571 int sum, i, ret_val;
12572 unsigned long iflags;
12573 struct lpfc_sli_ring *pring_s4 = NULL;
12574 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12577 spin_lock_irqsave(&phba->hbalock, iflags);
12579 /* all I/Os are in process of being flushed */
12580 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12581 spin_unlock_irqrestore(&phba->hbalock, iflags);
12586 for (i = 1; i <= phba->sli.last_iotag; i++) {
12587 iocbq = phba->sli.iocbq_lookup[i];
12589 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12592 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12596 /* Guard against IO completion being called at same time */
12597 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12598 spin_lock(&lpfc_cmd->buf_lock);
12600 if (!lpfc_cmd->pCmd) {
12601 spin_unlock(&lpfc_cmd->buf_lock);
12605 if (phba->sli_rev == LPFC_SLI_REV4) {
12607 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12609 spin_unlock(&lpfc_cmd->buf_lock);
12612 /* Note: both hbalock and ring_lock must be set here */
12613 spin_lock(&pring_s4->ring_lock);
12617 * If the iocbq is already being aborted, don't take a second
12618 * action, but do count it.
12620 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12621 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12622 if (phba->sli_rev == LPFC_SLI_REV4)
12623 spin_unlock(&pring_s4->ring_lock);
12624 spin_unlock(&lpfc_cmd->buf_lock);
12628 /* issue ABTS for this IOCB based on iotag */
12629 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12631 if (phba->sli_rev == LPFC_SLI_REV4)
12632 spin_unlock(&pring_s4->ring_lock);
12633 spin_unlock(&lpfc_cmd->buf_lock);
12637 if (phba->sli_rev == LPFC_SLI_REV4) {
12638 iotag = abtsiocbq->iotag;
12639 ulp_context = iocbq->sli4_xritag;
12640 cqid = lpfc_cmd->hdwq->io_cq_map;
12642 iotag = iocbq->iocb.ulpIoTag;
12643 if (pring->ringno == LPFC_ELS_RING) {
12644 ndlp = iocbq->ndlp;
12645 ulp_context = ndlp->nlp_rpi;
12647 ulp_context = iocbq->iocb.ulpContext;
12651 ndlp = lpfc_cmd->rdata->pnode;
12653 if (lpfc_is_link_up(phba) &&
12654 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12655 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12660 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12661 iocbq->iocb.ulpClass, cqid,
12664 abtsiocbq->vport = vport;
12666 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12667 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12668 if (iocbq->cmd_flag & LPFC_IO_FCP)
12669 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12670 if (iocbq->cmd_flag & LPFC_IO_FOF)
12671 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12673 /* Setup callback routine and issue the command. */
12674 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12677 * Indicate the IO is being aborted by the driver and set
12678 * the caller's flag into the aborted IO.
12680 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12682 if (phba->sli_rev == LPFC_SLI_REV4) {
12683 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12685 spin_unlock(&pring_s4->ring_lock);
12687 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12691 spin_unlock(&lpfc_cmd->buf_lock);
12693 if (ret_val == IOCB_ERROR)
12694 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12698 spin_unlock_irqrestore(&phba->hbalock, iflags);
12703 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12704 * @phba: Pointer to HBA context object.
12705 * @cmdiocbq: Pointer to command iocb.
12706 * @rspiocbq: Pointer to response iocb.
12708 * This function is the completion handler for iocbs issued using
12709 * lpfc_sli_issue_iocb_wait function. This function is called by the
12710 * ring event handler function without any lock held. This function
12711 * can be called from both worker thread context and interrupt
12712 * context. This function also can be called from other thread which
12713 * cleans up the SLI layer objects.
12714 * This function copy the contents of the response iocb to the
12715 * response iocb memory object provided by the caller of
12716 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12717 * sleeps for the iocb completion.
12720 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12721 struct lpfc_iocbq *cmdiocbq,
12722 struct lpfc_iocbq *rspiocbq)
12724 wait_queue_head_t *pdone_q;
12725 unsigned long iflags;
12726 struct lpfc_io_buf *lpfc_cmd;
12727 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12729 spin_lock_irqsave(&phba->hbalock, iflags);
12730 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12733 * A time out has occurred for the iocb. If a time out
12734 * completion handler has been supplied, call it. Otherwise,
12735 * just free the iocbq.
12738 spin_unlock_irqrestore(&phba->hbalock, iflags);
12739 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12740 cmdiocbq->wait_cmd_cmpl = NULL;
12741 if (cmdiocbq->cmd_cmpl)
12742 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
12744 lpfc_sli_release_iocbq(phba, cmdiocbq);
12748 /* Copy the contents of the local rspiocb into the caller's buffer. */
12749 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
12750 if (cmdiocbq->rsp_iocb && rspiocbq)
12751 memcpy((char *)cmdiocbq->rsp_iocb + offset,
12752 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
12754 /* Set the exchange busy flag for task management commands */
12755 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
12756 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
12757 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12759 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
12760 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12762 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12765 pdone_q = cmdiocbq->context_un.wait_queue;
12768 spin_unlock_irqrestore(&phba->hbalock, iflags);
12773 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12774 * @phba: Pointer to HBA context object..
12775 * @piocbq: Pointer to command iocb.
12776 * @flag: Flag to test.
12778 * This routine grabs the hbalock and then test the cmd_flag to
12779 * see if the passed in flag is set.
12781 * 1 if flag is set.
12782 * 0 if flag is not set.
12785 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12786 struct lpfc_iocbq *piocbq, uint32_t flag)
12788 unsigned long iflags;
12791 spin_lock_irqsave(&phba->hbalock, iflags);
12792 ret = piocbq->cmd_flag & flag;
12793 spin_unlock_irqrestore(&phba->hbalock, iflags);
12799 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12800 * @phba: Pointer to HBA context object..
12801 * @ring_number: Ring number
12802 * @piocb: Pointer to command iocb.
12803 * @prspiocbq: Pointer to response iocb.
12804 * @timeout: Timeout in number of seconds.
12806 * This function issues the iocb to firmware and waits for the
12807 * iocb to complete. The cmd_cmpl field of the shall be used
12808 * to handle iocbs which time out. If the field is NULL, the
12809 * function shall free the iocbq structure. If more clean up is
12810 * needed, the caller is expected to provide a completion function
12811 * that will provide the needed clean up. If the iocb command is
12812 * not completed within timeout seconds, the function will either
12813 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
12814 * completion function set in the cmd_cmpl field and then return
12815 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12816 * resources if this function returns IOCB_TIMEDOUT.
12817 * The function waits for the iocb completion using an
12818 * non-interruptible wait.
12819 * This function will sleep while waiting for iocb completion.
12820 * So, this function should not be called from any context which
12821 * does not allow sleeping. Due to the same reason, this function
12822 * cannot be called with interrupt disabled.
12823 * This function assumes that the iocb completions occur while
12824 * this function sleep. So, this function cannot be called from
12825 * the thread which process iocb completion for this ring.
12826 * This function clears the cmd_flag of the iocb object before
12827 * issuing the iocb and the iocb completion handler sets this
12828 * flag and wakes this thread when the iocb completes.
12829 * The contents of the response iocb will be copied to prspiocbq
12830 * by the completion handler when the command completes.
12831 * This function returns IOCB_SUCCESS when success.
12832 * This function is called with no lock held.
12835 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12836 uint32_t ring_number,
12837 struct lpfc_iocbq *piocb,
12838 struct lpfc_iocbq *prspiocbq,
12841 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12842 long timeleft, timeout_req = 0;
12843 int retval = IOCB_SUCCESS;
12845 struct lpfc_iocbq *iocb;
12847 int txcmplq_cnt = 0;
12848 struct lpfc_sli_ring *pring;
12849 unsigned long iflags;
12850 bool iocb_completed = true;
12852 if (phba->sli_rev >= LPFC_SLI_REV4) {
12853 lpfc_sli_prep_wqe(phba, piocb);
12855 pring = lpfc_sli4_calc_ring(phba, piocb);
12857 pring = &phba->sli.sli3_ring[ring_number];
12859 * If the caller has provided a response iocbq buffer, then rsp_iocb
12860 * is NULL or its an error.
12863 if (piocb->rsp_iocb)
12865 piocb->rsp_iocb = prspiocbq;
12868 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
12869 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
12870 piocb->context_un.wait_queue = &done_q;
12871 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12873 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12874 if (lpfc_readl(phba->HCregaddr, &creg_val))
12876 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12877 writel(creg_val, phba->HCregaddr);
12878 readl(phba->HCregaddr); /* flush */
12881 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12882 SLI_IOCB_RET_IOCB);
12883 if (retval == IOCB_SUCCESS) {
12884 timeout_req = msecs_to_jiffies(timeout * 1000);
12885 timeleft = wait_event_timeout(done_q,
12886 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12888 spin_lock_irqsave(&phba->hbalock, iflags);
12889 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
12892 * IOCB timed out. Inform the wake iocb wait
12893 * completion function and set local status
12896 iocb_completed = false;
12897 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
12899 spin_unlock_irqrestore(&phba->hbalock, iflags);
12900 if (iocb_completed) {
12901 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12902 "0331 IOCB wake signaled\n");
12903 /* Note: we are not indicating if the IOCB has a success
12904 * status or not - that's for the caller to check.
12905 * IOCB_SUCCESS means just that the command was sent and
12906 * completed. Not that it completed successfully.
12908 } else if (timeleft == 0) {
12909 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12910 "0338 IOCB wait timeout error - no "
12911 "wake response Data x%x\n", timeout);
12912 retval = IOCB_TIMEDOUT;
12914 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12915 "0330 IOCB wake NOT set, "
12917 timeout, (timeleft / jiffies));
12918 retval = IOCB_TIMEDOUT;
12920 } else if (retval == IOCB_BUSY) {
12921 if (phba->cfg_log_verbose & LOG_SLI) {
12922 list_for_each_entry(iocb, &pring->txq, list) {
12925 list_for_each_entry(iocb, &pring->txcmplq, list) {
12928 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12929 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12930 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12934 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12935 "0332 IOCB wait issue failed, Data x%x\n",
12937 retval = IOCB_ERROR;
12940 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12941 if (lpfc_readl(phba->HCregaddr, &creg_val))
12943 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12944 writel(creg_val, phba->HCregaddr);
12945 readl(phba->HCregaddr); /* flush */
12949 piocb->rsp_iocb = NULL;
12951 piocb->context_un.wait_queue = NULL;
12952 piocb->cmd_cmpl = NULL;
12957 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12958 * @phba: Pointer to HBA context object.
12959 * @pmboxq: Pointer to driver mailbox object.
12960 * @timeout: Timeout in number of seconds.
12962 * This function issues the mailbox to firmware and waits for the
12963 * mailbox command to complete. If the mailbox command is not
12964 * completed within timeout seconds, it returns MBX_TIMEOUT.
12965 * The function waits for the mailbox completion using an
12966 * interruptible wait. If the thread is woken up due to a
12967 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12968 * should not free the mailbox resources, if this function returns
12970 * This function will sleep while waiting for mailbox completion.
12971 * So, this function should not be called from any context which
12972 * does not allow sleeping. Due to the same reason, this function
12973 * cannot be called with interrupt disabled.
12974 * This function assumes that the mailbox completion occurs while
12975 * this function sleep. So, this function cannot be called from
12976 * the worker thread which processes mailbox completion.
12977 * This function is called in the context of HBA management
12979 * This function returns MBX_SUCCESS when successful.
12980 * This function is called with no lock held.
12983 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12986 struct completion mbox_done;
12988 unsigned long flag;
12990 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12991 /* setup wake call as IOCB callback */
12992 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12994 /* setup context3 field to pass wait_queue pointer to wake function */
12995 init_completion(&mbox_done);
12996 pmboxq->context3 = &mbox_done;
12997 /* now issue the command */
12998 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12999 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13000 wait_for_completion_timeout(&mbox_done,
13001 msecs_to_jiffies(timeout * 1000));
13003 spin_lock_irqsave(&phba->hbalock, flag);
13004 pmboxq->context3 = NULL;
13006 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13007 * else do not free the resources.
13009 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13010 retval = MBX_SUCCESS;
13012 retval = MBX_TIMEOUT;
13013 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13015 spin_unlock_irqrestore(&phba->hbalock, flag);
13021 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13022 * @phba: Pointer to HBA context.
13023 * @mbx_action: Mailbox shutdown options.
13025 * This function is called to shutdown the driver's mailbox sub-system.
13026 * It first marks the mailbox sub-system is in a block state to prevent
13027 * the asynchronous mailbox command from issued off the pending mailbox
13028 * command queue. If the mailbox command sub-system shutdown is due to
13029 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13030 * the mailbox sub-system flush routine to forcefully bring down the
13031 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13032 * as with offline or HBA function reset), this routine will wait for the
13033 * outstanding mailbox command to complete before invoking the mailbox
13034 * sub-system flush routine to gracefully bring down mailbox sub-system.
13037 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13039 struct lpfc_sli *psli = &phba->sli;
13040 unsigned long timeout;
13042 if (mbx_action == LPFC_MBX_NO_WAIT) {
13043 /* delay 100ms for port state */
13045 lpfc_sli_mbox_sys_flush(phba);
13048 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13050 /* Disable softirqs, including timers from obtaining phba->hbalock */
13051 local_bh_disable();
13053 spin_lock_irq(&phba->hbalock);
13054 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13056 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13057 /* Determine how long we might wait for the active mailbox
13058 * command to be gracefully completed by firmware.
13060 if (phba->sli.mbox_active)
13061 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13062 phba->sli.mbox_active) *
13064 spin_unlock_irq(&phba->hbalock);
13066 /* Enable softirqs again, done with phba->hbalock */
13069 while (phba->sli.mbox_active) {
13070 /* Check active mailbox complete status every 2ms */
13072 if (time_after(jiffies, timeout))
13073 /* Timeout, let the mailbox flush routine to
13074 * forcefully release active mailbox command
13079 spin_unlock_irq(&phba->hbalock);
13081 /* Enable softirqs again, done with phba->hbalock */
13085 lpfc_sli_mbox_sys_flush(phba);
13089 * lpfc_sli_eratt_read - read sli-3 error attention events
13090 * @phba: Pointer to HBA context.
13092 * This function is called to read the SLI3 device error attention registers
13093 * for possible error attention events. The caller must hold the hostlock
13094 * with spin_lock_irq().
13096 * This function returns 1 when there is Error Attention in the Host Attention
13097 * Register and returns 0 otherwise.
13100 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13104 /* Read chip Host Attention (HA) register */
13105 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13108 if (ha_copy & HA_ERATT) {
13109 /* Read host status register to retrieve error event */
13110 if (lpfc_sli_read_hs(phba))
13113 /* Check if there is a deferred error condition is active */
13114 if ((HS_FFER1 & phba->work_hs) &&
13115 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13116 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13117 phba->hba_flag |= DEFER_ERATT;
13118 /* Clear all interrupt enable conditions */
13119 writel(0, phba->HCregaddr);
13120 readl(phba->HCregaddr);
13123 /* Set the driver HA work bitmap */
13124 phba->work_ha |= HA_ERATT;
13125 /* Indicate polling handles this ERATT */
13126 phba->hba_flag |= HBA_ERATT_HANDLED;
13132 /* Set the driver HS work bitmap */
13133 phba->work_hs |= UNPLUG_ERR;
13134 /* Set the driver HA work bitmap */
13135 phba->work_ha |= HA_ERATT;
13136 /* Indicate polling handles this ERATT */
13137 phba->hba_flag |= HBA_ERATT_HANDLED;
13142 * lpfc_sli4_eratt_read - read sli-4 error attention events
13143 * @phba: Pointer to HBA context.
13145 * This function is called to read the SLI4 device error attention registers
13146 * for possible error attention events. The caller must hold the hostlock
13147 * with spin_lock_irq().
13149 * This function returns 1 when there is Error Attention in the Host Attention
13150 * Register and returns 0 otherwise.
13153 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13155 uint32_t uerr_sta_hi, uerr_sta_lo;
13156 uint32_t if_type, portsmphr;
13157 struct lpfc_register portstat_reg;
13161 * For now, use the SLI4 device internal unrecoverable error
13162 * registers for error attention. This can be changed later.
13164 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13166 case LPFC_SLI_INTF_IF_TYPE_0:
13167 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13169 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13171 phba->work_hs |= UNPLUG_ERR;
13172 phba->work_ha |= HA_ERATT;
13173 phba->hba_flag |= HBA_ERATT_HANDLED;
13176 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13177 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13179 "1423 HBA Unrecoverable error: "
13180 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13181 "ue_mask_lo_reg=0x%x, "
13182 "ue_mask_hi_reg=0x%x\n",
13183 uerr_sta_lo, uerr_sta_hi,
13184 phba->sli4_hba.ue_mask_lo,
13185 phba->sli4_hba.ue_mask_hi);
13186 phba->work_status[0] = uerr_sta_lo;
13187 phba->work_status[1] = uerr_sta_hi;
13188 phba->work_ha |= HA_ERATT;
13189 phba->hba_flag |= HBA_ERATT_HANDLED;
13193 case LPFC_SLI_INTF_IF_TYPE_2:
13194 case LPFC_SLI_INTF_IF_TYPE_6:
13195 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13196 &portstat_reg.word0) ||
13197 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13199 phba->work_hs |= UNPLUG_ERR;
13200 phba->work_ha |= HA_ERATT;
13201 phba->hba_flag |= HBA_ERATT_HANDLED;
13204 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13205 phba->work_status[0] =
13206 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13207 phba->work_status[1] =
13208 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13209 logmask = LOG_TRACE_EVENT;
13210 if (phba->work_status[0] ==
13211 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13212 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13214 lpfc_printf_log(phba, KERN_ERR, logmask,
13215 "2885 Port Status Event: "
13216 "port status reg 0x%x, "
13217 "port smphr reg 0x%x, "
13218 "error 1=0x%x, error 2=0x%x\n",
13219 portstat_reg.word0,
13221 phba->work_status[0],
13222 phba->work_status[1]);
13223 phba->work_ha |= HA_ERATT;
13224 phba->hba_flag |= HBA_ERATT_HANDLED;
13228 case LPFC_SLI_INTF_IF_TYPE_1:
13230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13231 "2886 HBA Error Attention on unsupported "
13232 "if type %d.", if_type);
13240 * lpfc_sli_check_eratt - check error attention events
13241 * @phba: Pointer to HBA context.
13243 * This function is called from timer soft interrupt context to check HBA's
13244 * error attention register bit for error attention events.
13246 * This function returns 1 when there is Error Attention in the Host Attention
13247 * Register and returns 0 otherwise.
13250 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13254 /* If somebody is waiting to handle an eratt, don't process it
13255 * here. The brdkill function will do this.
13257 if (phba->link_flag & LS_IGNORE_ERATT)
13260 /* Check if interrupt handler handles this ERATT */
13261 spin_lock_irq(&phba->hbalock);
13262 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13263 /* Interrupt handler has handled ERATT */
13264 spin_unlock_irq(&phba->hbalock);
13269 * If there is deferred error attention, do not check for error
13272 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13273 spin_unlock_irq(&phba->hbalock);
13277 /* If PCI channel is offline, don't process it */
13278 if (unlikely(pci_channel_offline(phba->pcidev))) {
13279 spin_unlock_irq(&phba->hbalock);
13283 switch (phba->sli_rev) {
13284 case LPFC_SLI_REV2:
13285 case LPFC_SLI_REV3:
13286 /* Read chip Host Attention (HA) register */
13287 ha_copy = lpfc_sli_eratt_read(phba);
13289 case LPFC_SLI_REV4:
13290 /* Read device Uncoverable Error (UERR) registers */
13291 ha_copy = lpfc_sli4_eratt_read(phba);
13294 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13295 "0299 Invalid SLI revision (%d)\n",
13300 spin_unlock_irq(&phba->hbalock);
13306 * lpfc_intr_state_check - Check device state for interrupt handling
13307 * @phba: Pointer to HBA context.
13309 * This inline routine checks whether a device or its PCI slot is in a state
13310 * that the interrupt should be handled.
13312 * This function returns 0 if the device or the PCI slot is in a state that
13313 * interrupt should be handled, otherwise -EIO.
13316 lpfc_intr_state_check(struct lpfc_hba *phba)
13318 /* If the pci channel is offline, ignore all the interrupts */
13319 if (unlikely(pci_channel_offline(phba->pcidev)))
13322 /* Update device level interrupt statistics */
13323 phba->sli.slistat.sli_intr++;
13325 /* Ignore all interrupts during initialization. */
13326 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13333 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13334 * @irq: Interrupt number.
13335 * @dev_id: The device context pointer.
13337 * This function is directly called from the PCI layer as an interrupt
13338 * service routine when device with SLI-3 interface spec is enabled with
13339 * MSI-X multi-message interrupt mode and there are slow-path events in
13340 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13341 * interrupt mode, this function is called as part of the device-level
13342 * interrupt handler. When the PCI slot is in error recovery or the HBA
13343 * is undergoing initialization, the interrupt handler will not process
13344 * the interrupt. The link attention and ELS ring attention events are
13345 * handled by the worker thread. The interrupt handler signals the worker
13346 * thread and returns for these events. This function is called without
13347 * any lock held. It gets the hbalock to access and update SLI data
13350 * This function returns IRQ_HANDLED when interrupt is handled else it
13351 * returns IRQ_NONE.
13354 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13356 struct lpfc_hba *phba;
13357 uint32_t ha_copy, hc_copy;
13358 uint32_t work_ha_copy;
13359 unsigned long status;
13360 unsigned long iflag;
13363 MAILBOX_t *mbox, *pmbox;
13364 struct lpfc_vport *vport;
13365 struct lpfc_nodelist *ndlp;
13366 struct lpfc_dmabuf *mp;
13371 * Get the driver's phba structure from the dev_id and
13372 * assume the HBA is not interrupting.
13374 phba = (struct lpfc_hba *)dev_id;
13376 if (unlikely(!phba))
13380 * Stuff needs to be attented to when this function is invoked as an
13381 * individual interrupt handler in MSI-X multi-message interrupt mode
13383 if (phba->intr_type == MSIX) {
13384 /* Check device state for handling interrupt */
13385 if (lpfc_intr_state_check(phba))
13387 /* Need to read HA REG for slow-path events */
13388 spin_lock_irqsave(&phba->hbalock, iflag);
13389 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13391 /* If somebody is waiting to handle an eratt don't process it
13392 * here. The brdkill function will do this.
13394 if (phba->link_flag & LS_IGNORE_ERATT)
13395 ha_copy &= ~HA_ERATT;
13396 /* Check the need for handling ERATT in interrupt handler */
13397 if (ha_copy & HA_ERATT) {
13398 if (phba->hba_flag & HBA_ERATT_HANDLED)
13399 /* ERATT polling has handled ERATT */
13400 ha_copy &= ~HA_ERATT;
13402 /* Indicate interrupt handler handles ERATT */
13403 phba->hba_flag |= HBA_ERATT_HANDLED;
13407 * If there is deferred error attention, do not check for any
13410 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13411 spin_unlock_irqrestore(&phba->hbalock, iflag);
13415 /* Clear up only attention source related to slow-path */
13416 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13419 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13420 HC_LAINT_ENA | HC_ERINT_ENA),
13422 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13424 writel(hc_copy, phba->HCregaddr);
13425 readl(phba->HAregaddr); /* flush */
13426 spin_unlock_irqrestore(&phba->hbalock, iflag);
13428 ha_copy = phba->ha_copy;
13430 work_ha_copy = ha_copy & phba->work_ha_mask;
13432 if (work_ha_copy) {
13433 if (work_ha_copy & HA_LATT) {
13434 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13436 * Turn off Link Attention interrupts
13437 * until CLEAR_LA done
13439 spin_lock_irqsave(&phba->hbalock, iflag);
13440 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13441 if (lpfc_readl(phba->HCregaddr, &control))
13443 control &= ~HC_LAINT_ENA;
13444 writel(control, phba->HCregaddr);
13445 readl(phba->HCregaddr); /* flush */
13446 spin_unlock_irqrestore(&phba->hbalock, iflag);
13449 work_ha_copy &= ~HA_LATT;
13452 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13454 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13455 * the only slow ring.
13457 status = (work_ha_copy &
13458 (HA_RXMASK << (4*LPFC_ELS_RING)));
13459 status >>= (4*LPFC_ELS_RING);
13460 if (status & HA_RXMASK) {
13461 spin_lock_irqsave(&phba->hbalock, iflag);
13462 if (lpfc_readl(phba->HCregaddr, &control))
13465 lpfc_debugfs_slow_ring_trc(phba,
13466 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13468 (uint32_t)phba->sli.slistat.sli_intr);
13470 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13471 lpfc_debugfs_slow_ring_trc(phba,
13472 "ISR Disable ring:"
13473 "pwork:x%x hawork:x%x wait:x%x",
13474 phba->work_ha, work_ha_copy,
13475 (uint32_t)((unsigned long)
13476 &phba->work_waitq));
13479 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13480 writel(control, phba->HCregaddr);
13481 readl(phba->HCregaddr); /* flush */
13484 lpfc_debugfs_slow_ring_trc(phba,
13485 "ISR slow ring: pwork:"
13486 "x%x hawork:x%x wait:x%x",
13487 phba->work_ha, work_ha_copy,
13488 (uint32_t)((unsigned long)
13489 &phba->work_waitq));
13491 spin_unlock_irqrestore(&phba->hbalock, iflag);
13494 spin_lock_irqsave(&phba->hbalock, iflag);
13495 if (work_ha_copy & HA_ERATT) {
13496 if (lpfc_sli_read_hs(phba))
13499 * Check if there is a deferred error condition
13502 if ((HS_FFER1 & phba->work_hs) &&
13503 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13504 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13506 phba->hba_flag |= DEFER_ERATT;
13507 /* Clear all interrupt enable conditions */
13508 writel(0, phba->HCregaddr);
13509 readl(phba->HCregaddr);
13513 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13514 pmb = phba->sli.mbox_active;
13515 pmbox = &pmb->u.mb;
13517 vport = pmb->vport;
13519 /* First check out the status word */
13520 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13521 if (pmbox->mbxOwner != OWN_HOST) {
13522 spin_unlock_irqrestore(&phba->hbalock, iflag);
13524 * Stray Mailbox Interrupt, mbxCommand <cmd>
13525 * mbxStatus <status>
13527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13528 "(%d):0304 Stray Mailbox "
13529 "Interrupt mbxCommand x%x "
13531 (vport ? vport->vpi : 0),
13534 /* clear mailbox attention bit */
13535 work_ha_copy &= ~HA_MBATT;
13537 phba->sli.mbox_active = NULL;
13538 spin_unlock_irqrestore(&phba->hbalock, iflag);
13539 phba->last_completion_time = jiffies;
13540 del_timer(&phba->sli.mbox_tmo);
13541 if (pmb->mbox_cmpl) {
13542 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13544 if (pmb->out_ext_byte_len &&
13546 lpfc_sli_pcimem_bcopy(
13549 pmb->out_ext_byte_len);
13551 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13552 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13554 lpfc_debugfs_disc_trc(vport,
13555 LPFC_DISC_TRC_MBOX_VPORT,
13556 "MBOX dflt rpi: : "
13557 "status:x%x rpi:x%x",
13558 (uint32_t)pmbox->mbxStatus,
13559 pmbox->un.varWords[0], 0);
13561 if (!pmbox->mbxStatus) {
13562 mp = (struct lpfc_dmabuf *)
13564 ndlp = (struct lpfc_nodelist *)
13567 /* Reg_LOGIN of dflt RPI was
13568 * successful. new lets get
13569 * rid of the RPI using the
13570 * same mbox buffer.
13572 lpfc_unreg_login(phba,
13574 pmbox->un.varWords[0],
13577 lpfc_mbx_cmpl_dflt_rpi;
13579 pmb->ctx_ndlp = ndlp;
13580 pmb->vport = vport;
13581 rc = lpfc_sli_issue_mbox(phba,
13584 if (rc != MBX_BUSY)
13585 lpfc_printf_log(phba,
13588 "0350 rc should have"
13589 "been MBX_BUSY\n");
13590 if (rc != MBX_NOT_FINISHED)
13591 goto send_current_mbox;
13595 &phba->pport->work_port_lock,
13597 phba->pport->work_port_events &=
13599 spin_unlock_irqrestore(
13600 &phba->pport->work_port_lock,
13603 /* Do NOT queue MBX_HEARTBEAT to the worker
13604 * thread for processing.
13606 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13607 /* Process mbox now */
13608 phba->sli.mbox_active = NULL;
13609 phba->sli.sli_flag &=
13610 ~LPFC_SLI_MBOX_ACTIVE;
13611 if (pmb->mbox_cmpl)
13612 pmb->mbox_cmpl(phba, pmb);
13614 /* Queue to worker thread to process */
13615 lpfc_mbox_cmpl_put(phba, pmb);
13619 spin_unlock_irqrestore(&phba->hbalock, iflag);
13621 if ((work_ha_copy & HA_MBATT) &&
13622 (phba->sli.mbox_active == NULL)) {
13624 /* Process next mailbox command if there is one */
13626 rc = lpfc_sli_issue_mbox(phba, NULL,
13628 } while (rc == MBX_NOT_FINISHED);
13629 if (rc != MBX_SUCCESS)
13630 lpfc_printf_log(phba, KERN_ERR,
13632 "0349 rc should be "
13636 spin_lock_irqsave(&phba->hbalock, iflag);
13637 phba->work_ha |= work_ha_copy;
13638 spin_unlock_irqrestore(&phba->hbalock, iflag);
13639 lpfc_worker_wake_up(phba);
13641 return IRQ_HANDLED;
13643 spin_unlock_irqrestore(&phba->hbalock, iflag);
13644 return IRQ_HANDLED;
13646 } /* lpfc_sli_sp_intr_handler */
13649 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13650 * @irq: Interrupt number.
13651 * @dev_id: The device context pointer.
13653 * This function is directly called from the PCI layer as an interrupt
13654 * service routine when device with SLI-3 interface spec is enabled with
13655 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13656 * ring event in the HBA. However, when the device is enabled with either
13657 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13658 * device-level interrupt handler. When the PCI slot is in error recovery
13659 * or the HBA is undergoing initialization, the interrupt handler will not
13660 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13661 * the intrrupt context. This function is called without any lock held.
13662 * It gets the hbalock to access and update SLI data structures.
13664 * This function returns IRQ_HANDLED when interrupt is handled else it
13665 * returns IRQ_NONE.
13668 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13670 struct lpfc_hba *phba;
13672 unsigned long status;
13673 unsigned long iflag;
13674 struct lpfc_sli_ring *pring;
13676 /* Get the driver's phba structure from the dev_id and
13677 * assume the HBA is not interrupting.
13679 phba = (struct lpfc_hba *) dev_id;
13681 if (unlikely(!phba))
13685 * Stuff needs to be attented to when this function is invoked as an
13686 * individual interrupt handler in MSI-X multi-message interrupt mode
13688 if (phba->intr_type == MSIX) {
13689 /* Check device state for handling interrupt */
13690 if (lpfc_intr_state_check(phba))
13692 /* Need to read HA REG for FCP ring and other ring events */
13693 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13694 return IRQ_HANDLED;
13695 /* Clear up only attention source related to fast-path */
13696 spin_lock_irqsave(&phba->hbalock, iflag);
13698 * If there is deferred error attention, do not check for
13701 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13702 spin_unlock_irqrestore(&phba->hbalock, iflag);
13705 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13707 readl(phba->HAregaddr); /* flush */
13708 spin_unlock_irqrestore(&phba->hbalock, iflag);
13710 ha_copy = phba->ha_copy;
13713 * Process all events on FCP ring. Take the optimized path for FCP IO.
13715 ha_copy &= ~(phba->work_ha_mask);
13717 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13718 status >>= (4*LPFC_FCP_RING);
13719 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13720 if (status & HA_RXMASK)
13721 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13723 if (phba->cfg_multi_ring_support == 2) {
13725 * Process all events on extra ring. Take the optimized path
13726 * for extra ring IO.
13728 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13729 status >>= (4*LPFC_EXTRA_RING);
13730 if (status & HA_RXMASK) {
13731 lpfc_sli_handle_fast_ring_event(phba,
13732 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13736 return IRQ_HANDLED;
13737 } /* lpfc_sli_fp_intr_handler */
13740 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13741 * @irq: Interrupt number.
13742 * @dev_id: The device context pointer.
13744 * This function is the HBA device-level interrupt handler to device with
13745 * SLI-3 interface spec, called from the PCI layer when either MSI or
13746 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13747 * requires driver attention. This function invokes the slow-path interrupt
13748 * attention handling function and fast-path interrupt attention handling
13749 * function in turn to process the relevant HBA attention events. This
13750 * function is called without any lock held. It gets the hbalock to access
13751 * and update SLI data structures.
13753 * This function returns IRQ_HANDLED when interrupt is handled, else it
13754 * returns IRQ_NONE.
13757 lpfc_sli_intr_handler(int irq, void *dev_id)
13759 struct lpfc_hba *phba;
13760 irqreturn_t sp_irq_rc, fp_irq_rc;
13761 unsigned long status1, status2;
13765 * Get the driver's phba structure from the dev_id and
13766 * assume the HBA is not interrupting.
13768 phba = (struct lpfc_hba *) dev_id;
13770 if (unlikely(!phba))
13773 /* Check device state for handling interrupt */
13774 if (lpfc_intr_state_check(phba))
13777 spin_lock(&phba->hbalock);
13778 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13779 spin_unlock(&phba->hbalock);
13780 return IRQ_HANDLED;
13783 if (unlikely(!phba->ha_copy)) {
13784 spin_unlock(&phba->hbalock);
13786 } else if (phba->ha_copy & HA_ERATT) {
13787 if (phba->hba_flag & HBA_ERATT_HANDLED)
13788 /* ERATT polling has handled ERATT */
13789 phba->ha_copy &= ~HA_ERATT;
13791 /* Indicate interrupt handler handles ERATT */
13792 phba->hba_flag |= HBA_ERATT_HANDLED;
13796 * If there is deferred error attention, do not check for any interrupt.
13798 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13799 spin_unlock(&phba->hbalock);
13803 /* Clear attention sources except link and error attentions */
13804 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13805 spin_unlock(&phba->hbalock);
13806 return IRQ_HANDLED;
13808 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13809 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13811 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13812 writel(hc_copy, phba->HCregaddr);
13813 readl(phba->HAregaddr); /* flush */
13814 spin_unlock(&phba->hbalock);
13817 * Invokes slow-path host attention interrupt handling as appropriate.
13820 /* status of events with mailbox and link attention */
13821 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13823 /* status of events with ELS ring */
13824 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13825 status2 >>= (4*LPFC_ELS_RING);
13827 if (status1 || (status2 & HA_RXMASK))
13828 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13830 sp_irq_rc = IRQ_NONE;
13833 * Invoke fast-path host attention interrupt handling as appropriate.
13836 /* status of events with FCP ring */
13837 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13838 status1 >>= (4*LPFC_FCP_RING);
13840 /* status of events with extra ring */
13841 if (phba->cfg_multi_ring_support == 2) {
13842 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13843 status2 >>= (4*LPFC_EXTRA_RING);
13847 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13848 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13850 fp_irq_rc = IRQ_NONE;
13852 /* Return device-level interrupt handling status */
13853 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13854 } /* lpfc_sli_intr_handler */
13857 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13858 * @phba: pointer to lpfc hba data structure.
13860 * This routine is invoked by the worker thread to process all the pending
13861 * SLI4 els abort xri events.
13863 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13865 struct lpfc_cq_event *cq_event;
13866 unsigned long iflags;
13868 /* First, declare the els xri abort event has been handled */
13869 spin_lock_irqsave(&phba->hbalock, iflags);
13870 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13871 spin_unlock_irqrestore(&phba->hbalock, iflags);
13873 /* Now, handle all the els xri abort events */
13874 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13875 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13876 /* Get the first event from the head of the event queue */
13877 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13878 cq_event, struct lpfc_cq_event, list);
13879 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13881 /* Notify aborted XRI for ELS work queue */
13882 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13884 /* Free the event processed back to the free pool */
13885 lpfc_sli4_cq_event_release(phba, cq_event);
13886 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13889 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13893 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
13894 * @phba: Pointer to HBA context object.
13895 * @irspiocbq: Pointer to work-queue completion queue entry.
13897 * This routine handles an ELS work-queue completion event and construct
13898 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
13899 * discovery engine to handle.
13901 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13903 static struct lpfc_iocbq *
13904 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
13905 struct lpfc_iocbq *irspiocbq)
13907 struct lpfc_sli_ring *pring;
13908 struct lpfc_iocbq *cmdiocbq;
13909 struct lpfc_wcqe_complete *wcqe;
13910 unsigned long iflags;
13912 pring = lpfc_phba_elsring(phba);
13913 if (unlikely(!pring))
13916 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13917 spin_lock_irqsave(&pring->ring_lock, iflags);
13918 pring->stats.iocb_event++;
13919 /* Look up the ELS command IOCB and create pseudo response IOCB */
13920 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13921 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13922 if (unlikely(!cmdiocbq)) {
13923 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13924 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13925 "0386 ELS complete with no corresponding "
13926 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13927 wcqe->word0, wcqe->total_data_placed,
13928 wcqe->parameter, wcqe->word3);
13929 lpfc_sli_release_iocbq(phba, irspiocbq);
13933 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
13934 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
13936 /* Put the iocb back on the txcmplq */
13937 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13938 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13940 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13941 spin_lock_irqsave(&phba->hbalock, iflags);
13942 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
13943 spin_unlock_irqrestore(&phba->hbalock, iflags);
13949 inline struct lpfc_cq_event *
13950 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13952 struct lpfc_cq_event *cq_event;
13954 /* Allocate a new internal CQ_EVENT entry */
13955 cq_event = lpfc_sli4_cq_event_alloc(phba);
13957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13958 "0602 Failed to alloc CQ_EVENT entry\n");
13962 /* Move the CQE into the event */
13963 memcpy(&cq_event->cqe, entry, size);
13968 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13969 * @phba: Pointer to HBA context object.
13970 * @mcqe: Pointer to mailbox completion queue entry.
13972 * This routine process a mailbox completion queue entry with asynchronous
13975 * Return: true if work posted to worker thread, otherwise false.
13978 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13980 struct lpfc_cq_event *cq_event;
13981 unsigned long iflags;
13983 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13984 "0392 Async Event: word0:x%x, word1:x%x, "
13985 "word2:x%x, word3:x%x\n", mcqe->word0,
13986 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13988 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13992 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13993 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13994 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13996 /* Set the async event flag */
13997 spin_lock_irqsave(&phba->hbalock, iflags);
13998 phba->hba_flag |= ASYNC_EVENT;
13999 spin_unlock_irqrestore(&phba->hbalock, iflags);
14005 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14006 * @phba: Pointer to HBA context object.
14007 * @mcqe: Pointer to mailbox completion queue entry.
14009 * This routine process a mailbox completion queue entry with mailbox
14010 * completion event.
14012 * Return: true if work posted to worker thread, otherwise false.
14015 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14017 uint32_t mcqe_status;
14018 MAILBOX_t *mbox, *pmbox;
14019 struct lpfc_mqe *mqe;
14020 struct lpfc_vport *vport;
14021 struct lpfc_nodelist *ndlp;
14022 struct lpfc_dmabuf *mp;
14023 unsigned long iflags;
14025 bool workposted = false;
14028 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14029 if (!bf_get(lpfc_trailer_completed, mcqe))
14030 goto out_no_mqe_complete;
14032 /* Get the reference to the active mbox command */
14033 spin_lock_irqsave(&phba->hbalock, iflags);
14034 pmb = phba->sli.mbox_active;
14035 if (unlikely(!pmb)) {
14036 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14037 "1832 No pending MBOX command to handle\n");
14038 spin_unlock_irqrestore(&phba->hbalock, iflags);
14039 goto out_no_mqe_complete;
14041 spin_unlock_irqrestore(&phba->hbalock, iflags);
14043 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14045 vport = pmb->vport;
14047 /* Reset heartbeat timer */
14048 phba->last_completion_time = jiffies;
14049 del_timer(&phba->sli.mbox_tmo);
14051 /* Move mbox data to caller's mailbox region, do endian swapping */
14052 if (pmb->mbox_cmpl && mbox)
14053 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14056 * For mcqe errors, conditionally move a modified error code to
14057 * the mbox so that the error will not be missed.
14059 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14060 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14061 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14062 bf_set(lpfc_mqe_status, mqe,
14063 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14065 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14066 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14067 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14068 "MBOX dflt rpi: status:x%x rpi:x%x",
14070 pmbox->un.varWords[0], 0);
14071 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14072 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14073 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14075 /* Reg_LOGIN of dflt RPI was successful. Mark the
14076 * node as having an UNREG_LOGIN in progress to stop
14077 * an unsolicited PLOGI from the same NPortId from
14078 * starting another mailbox transaction.
14080 spin_lock_irqsave(&ndlp->lock, iflags);
14081 ndlp->nlp_flag |= NLP_UNREG_INP;
14082 spin_unlock_irqrestore(&ndlp->lock, iflags);
14083 lpfc_unreg_login(phba, vport->vpi,
14084 pmbox->un.varWords[0], pmb);
14085 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14088 /* No reference taken here. This is a default
14089 * RPI reg/immediate unreg cycle. The reference was
14090 * taken in the reg rpi path and is released when
14091 * this mailbox completes.
14093 pmb->ctx_ndlp = ndlp;
14094 pmb->vport = vport;
14095 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14096 if (rc != MBX_BUSY)
14097 lpfc_printf_log(phba, KERN_ERR,
14100 "have been MBX_BUSY\n");
14101 if (rc != MBX_NOT_FINISHED)
14102 goto send_current_mbox;
14105 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14106 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14107 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14109 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14110 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14111 spin_lock_irqsave(&phba->hbalock, iflags);
14112 /* Release the mailbox command posting token */
14113 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14114 phba->sli.mbox_active = NULL;
14115 if (bf_get(lpfc_trailer_consumed, mcqe))
14116 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14117 spin_unlock_irqrestore(&phba->hbalock, iflags);
14119 /* Post the next mbox command, if there is one */
14120 lpfc_sli4_post_async_mbox(phba);
14122 /* Process cmpl now */
14123 if (pmb->mbox_cmpl)
14124 pmb->mbox_cmpl(phba, pmb);
14128 /* There is mailbox completion work to queue to the worker thread */
14129 spin_lock_irqsave(&phba->hbalock, iflags);
14130 __lpfc_mbox_cmpl_put(phba, pmb);
14131 phba->work_ha |= HA_MBATT;
14132 spin_unlock_irqrestore(&phba->hbalock, iflags);
14136 spin_lock_irqsave(&phba->hbalock, iflags);
14137 /* Release the mailbox command posting token */
14138 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14139 /* Setting active mailbox pointer need to be in sync to flag clear */
14140 phba->sli.mbox_active = NULL;
14141 if (bf_get(lpfc_trailer_consumed, mcqe))
14142 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14143 spin_unlock_irqrestore(&phba->hbalock, iflags);
14144 /* Wake up worker thread to post the next pending mailbox command */
14145 lpfc_worker_wake_up(phba);
14148 out_no_mqe_complete:
14149 spin_lock_irqsave(&phba->hbalock, iflags);
14150 if (bf_get(lpfc_trailer_consumed, mcqe))
14151 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14152 spin_unlock_irqrestore(&phba->hbalock, iflags);
14157 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14158 * @phba: Pointer to HBA context object.
14159 * @cq: Pointer to associated CQ
14160 * @cqe: Pointer to mailbox completion queue entry.
14162 * This routine process a mailbox completion queue entry, it invokes the
14163 * proper mailbox complete handling or asynchronous event handling routine
14164 * according to the MCQE's async bit.
14166 * Return: true if work posted to worker thread, otherwise false.
14169 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14170 struct lpfc_cqe *cqe)
14172 struct lpfc_mcqe mcqe;
14177 /* Copy the mailbox MCQE and convert endian order as needed */
14178 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14180 /* Invoke the proper event handling routine */
14181 if (!bf_get(lpfc_trailer_async, &mcqe))
14182 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14184 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14189 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14190 * @phba: Pointer to HBA context object.
14191 * @cq: Pointer to associated CQ
14192 * @wcqe: Pointer to work-queue completion queue entry.
14194 * This routine handles an ELS work-queue completion event.
14196 * Return: true if work posted to worker thread, otherwise false.
14199 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14200 struct lpfc_wcqe_complete *wcqe)
14202 struct lpfc_iocbq *irspiocbq;
14203 unsigned long iflags;
14204 struct lpfc_sli_ring *pring = cq->pring;
14206 int txcmplq_cnt = 0;
14208 /* Check for response status */
14209 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14210 /* Log the error status */
14211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14212 "0357 ELS CQE error: status=x%x: "
14213 "CQE: %08x %08x %08x %08x\n",
14214 bf_get(lpfc_wcqe_c_status, wcqe),
14215 wcqe->word0, wcqe->total_data_placed,
14216 wcqe->parameter, wcqe->word3);
14219 /* Get an irspiocbq for later ELS response processing use */
14220 irspiocbq = lpfc_sli_get_iocbq(phba);
14222 if (!list_empty(&pring->txq))
14224 if (!list_empty(&pring->txcmplq))
14226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14227 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14228 "els_txcmplq_cnt=%d\n",
14229 txq_cnt, phba->iocb_cnt,
14234 /* Save off the slow-path queue event for work thread to process */
14235 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14236 spin_lock_irqsave(&phba->hbalock, iflags);
14237 list_add_tail(&irspiocbq->cq_event.list,
14238 &phba->sli4_hba.sp_queue_event);
14239 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14240 spin_unlock_irqrestore(&phba->hbalock, iflags);
14246 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14247 * @phba: Pointer to HBA context object.
14248 * @wcqe: Pointer to work-queue completion queue entry.
14250 * This routine handles slow-path WQ entry consumed event by invoking the
14251 * proper WQ release routine to the slow-path WQ.
14254 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14255 struct lpfc_wcqe_release *wcqe)
14257 /* sanity check on queue memory */
14258 if (unlikely(!phba->sli4_hba.els_wq))
14260 /* Check for the slow-path ELS work queue */
14261 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14262 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14263 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14265 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14266 "2579 Slow-path wqe consume event carries "
14267 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14268 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14269 phba->sli4_hba.els_wq->queue_id);
14273 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14274 * @phba: Pointer to HBA context object.
14275 * @cq: Pointer to a WQ completion queue.
14276 * @wcqe: Pointer to work-queue completion queue entry.
14278 * This routine handles an XRI abort event.
14280 * Return: true if work posted to worker thread, otherwise false.
14283 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14284 struct lpfc_queue *cq,
14285 struct sli4_wcqe_xri_aborted *wcqe)
14287 bool workposted = false;
14288 struct lpfc_cq_event *cq_event;
14289 unsigned long iflags;
14291 switch (cq->subtype) {
14293 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14294 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14295 /* Notify aborted XRI for NVME work queue */
14296 if (phba->nvmet_support)
14297 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14299 workposted = false;
14301 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14303 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14305 workposted = false;
14308 cq_event->hdwq = cq->hdwq;
14309 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14311 list_add_tail(&cq_event->list,
14312 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14313 /* Set the els xri abort event flag */
14314 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14315 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14321 "0603 Invalid CQ subtype %d: "
14322 "%08x %08x %08x %08x\n",
14323 cq->subtype, wcqe->word0, wcqe->parameter,
14324 wcqe->word2, wcqe->word3);
14325 workposted = false;
14331 #define FC_RCTL_MDS_DIAGS 0xF4
14334 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14335 * @phba: Pointer to HBA context object.
14336 * @rcqe: Pointer to receive-queue completion queue entry.
14338 * This routine process a receive-queue completion queue entry.
14340 * Return: true if work posted to worker thread, otherwise false.
14343 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14345 bool workposted = false;
14346 struct fc_frame_header *fc_hdr;
14347 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14348 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14349 struct lpfc_nvmet_tgtport *tgtp;
14350 struct hbq_dmabuf *dma_buf;
14351 uint32_t status, rq_id;
14352 unsigned long iflags;
14354 /* sanity check on queue memory */
14355 if (unlikely(!hrq) || unlikely(!drq))
14358 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14359 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14361 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14362 if (rq_id != hrq->queue_id)
14365 status = bf_get(lpfc_rcqe_status, rcqe);
14367 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14369 "2537 Receive Frame Truncated!!\n");
14371 case FC_STATUS_RQ_SUCCESS:
14372 spin_lock_irqsave(&phba->hbalock, iflags);
14373 lpfc_sli4_rq_release(hrq, drq);
14374 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14376 hrq->RQ_no_buf_found++;
14377 spin_unlock_irqrestore(&phba->hbalock, iflags);
14381 hrq->RQ_buf_posted--;
14382 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14384 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14386 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14387 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14388 spin_unlock_irqrestore(&phba->hbalock, iflags);
14389 /* Handle MDS Loopback frames */
14390 if (!(phba->pport->load_flag & FC_UNLOADING))
14391 lpfc_sli4_handle_mds_loopback(phba->pport,
14394 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14398 /* save off the frame for the work thread to process */
14399 list_add_tail(&dma_buf->cq_event.list,
14400 &phba->sli4_hba.sp_queue_event);
14401 /* Frame received */
14402 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14403 spin_unlock_irqrestore(&phba->hbalock, iflags);
14406 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14407 if (phba->nvmet_support) {
14408 tgtp = phba->targetport->private;
14409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14410 "6402 RQE Error x%x, posted %d err_cnt "
14412 status, hrq->RQ_buf_posted,
14413 hrq->RQ_no_posted_buf,
14414 atomic_read(&tgtp->rcv_fcp_cmd_in),
14415 atomic_read(&tgtp->rcv_fcp_cmd_out),
14416 atomic_read(&tgtp->xmt_fcp_release));
14420 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14421 hrq->RQ_no_posted_buf++;
14422 /* Post more buffers if possible */
14423 spin_lock_irqsave(&phba->hbalock, iflags);
14424 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14425 spin_unlock_irqrestore(&phba->hbalock, iflags);
14434 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14435 * @phba: Pointer to HBA context object.
14436 * @cq: Pointer to the completion queue.
14437 * @cqe: Pointer to a completion queue entry.
14439 * This routine process a slow-path work-queue or receive queue completion queue
14442 * Return: true if work posted to worker thread, otherwise false.
14445 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14446 struct lpfc_cqe *cqe)
14448 struct lpfc_cqe cqevt;
14449 bool workposted = false;
14451 /* Copy the work queue CQE and convert endian order if needed */
14452 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14454 /* Check and process for different type of WCQE and dispatch */
14455 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14456 case CQE_CODE_COMPL_WQE:
14457 /* Process the WQ/RQ complete event */
14458 phba->last_completion_time = jiffies;
14459 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14460 (struct lpfc_wcqe_complete *)&cqevt);
14462 case CQE_CODE_RELEASE_WQE:
14463 /* Process the WQ release event */
14464 lpfc_sli4_sp_handle_rel_wcqe(phba,
14465 (struct lpfc_wcqe_release *)&cqevt);
14467 case CQE_CODE_XRI_ABORTED:
14468 /* Process the WQ XRI abort event */
14469 phba->last_completion_time = jiffies;
14470 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14471 (struct sli4_wcqe_xri_aborted *)&cqevt);
14473 case CQE_CODE_RECEIVE:
14474 case CQE_CODE_RECEIVE_V1:
14475 /* Process the RQ event */
14476 phba->last_completion_time = jiffies;
14477 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14478 (struct lpfc_rcqe *)&cqevt);
14481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14482 "0388 Not a valid WCQE code: x%x\n",
14483 bf_get(lpfc_cqe_code, &cqevt));
14490 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14491 * @phba: Pointer to HBA context object.
14492 * @eqe: Pointer to fast-path event queue entry.
14493 * @speq: Pointer to slow-path event queue.
14495 * This routine process a event queue entry from the slow-path event queue.
14496 * It will check the MajorCode and MinorCode to determine this is for a
14497 * completion event on a completion queue, if not, an error shall be logged
14498 * and just return. Otherwise, it will get to the corresponding completion
14499 * queue and process all the entries on that completion queue, rearm the
14500 * completion queue, and then return.
14504 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14505 struct lpfc_queue *speq)
14507 struct lpfc_queue *cq = NULL, *childq;
14511 /* Get the reference to the corresponding CQ */
14512 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14514 list_for_each_entry(childq, &speq->child_list, list) {
14515 if (childq->queue_id == cqid) {
14520 if (unlikely(!cq)) {
14521 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14523 "0365 Slow-path CQ identifier "
14524 "(%d) does not exist\n", cqid);
14528 /* Save EQ associated with this CQ */
14529 cq->assoc_qp = speq;
14531 if (is_kdump_kernel())
14532 ret = queue_work(phba->wq, &cq->spwork);
14534 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14538 "0390 Cannot schedule queue work "
14539 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14540 cqid, cq->queue_id, raw_smp_processor_id());
14544 * __lpfc_sli4_process_cq - Process elements of a CQ
14545 * @phba: Pointer to HBA context object.
14546 * @cq: Pointer to CQ to be processed
14547 * @handler: Routine to process each cqe
14548 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14549 * @poll_mode: Polling mode we were called from
14551 * This routine processes completion queue entries in a CQ. While a valid
14552 * queue element is found, the handler is called. During processing checks
14553 * are made for periodic doorbell writes to let the hardware know of
14554 * element consumption.
14556 * If the max limit on cqes to process is hit, or there are no more valid
14557 * entries, the loop stops. If we processed a sufficient number of elements,
14558 * meaning there is sufficient load, rather than rearming and generating
14559 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14560 * indicates no rescheduling.
14562 * Returns True if work scheduled, False otherwise.
14565 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14566 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14567 struct lpfc_cqe *), unsigned long *delay,
14568 enum lpfc_poll_mode poll_mode)
14570 struct lpfc_cqe *cqe;
14571 bool workposted = false;
14572 int count = 0, consumed = 0;
14575 /* default - no reschedule */
14578 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14579 goto rearm_and_exit;
14581 /* Process all the entries to the CQ */
14583 cqe = lpfc_sli4_cq_get(cq);
14585 workposted |= handler(phba, cq, cqe);
14586 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14589 if (!(++count % cq->max_proc_limit))
14592 if (!(count % cq->notify_interval)) {
14593 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14596 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14599 if (count == LPFC_NVMET_CQ_NOTIFY)
14600 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14602 cqe = lpfc_sli4_cq_get(cq);
14604 if (count >= phba->cfg_cq_poll_threshold) {
14609 /* Note: complete the irq_poll softirq before rearming CQ */
14610 if (poll_mode == LPFC_IRQ_POLL)
14611 irq_poll_complete(&cq->iop);
14613 /* Track the max number of CQEs processed in 1 EQ */
14614 if (count > cq->CQ_max_cqe)
14615 cq->CQ_max_cqe = count;
14617 cq->assoc_qp->EQ_cqe_cnt += count;
14619 /* Catch the no cq entry condition */
14620 if (unlikely(count == 0))
14621 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14622 "0369 No entry from completion queue "
14623 "qid=%d\n", cq->queue_id);
14625 xchg(&cq->queue_claimed, 0);
14628 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14629 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14635 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14636 * @cq: pointer to CQ to process
14638 * This routine calls the cq processing routine with a handler specific
14639 * to the type of queue bound to it.
14641 * The CQ routine returns two values: the first is the calling status,
14642 * which indicates whether work was queued to the background discovery
14643 * thread. If true, the routine should wakeup the discovery thread;
14644 * the second is the delay parameter. If non-zero, rather than rearming
14645 * the CQ and yet another interrupt, the CQ handler should be queued so
14646 * that it is processed in a subsequent polling action. The value of
14647 * the delay indicates when to reschedule it.
14650 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14652 struct lpfc_hba *phba = cq->phba;
14653 unsigned long delay;
14654 bool workposted = false;
14657 /* Process and rearm the CQ */
14658 switch (cq->type) {
14660 workposted |= __lpfc_sli4_process_cq(phba, cq,
14661 lpfc_sli4_sp_handle_mcqe,
14662 &delay, LPFC_QUEUE_WORK);
14665 if (cq->subtype == LPFC_IO)
14666 workposted |= __lpfc_sli4_process_cq(phba, cq,
14667 lpfc_sli4_fp_handle_cqe,
14668 &delay, LPFC_QUEUE_WORK);
14670 workposted |= __lpfc_sli4_process_cq(phba, cq,
14671 lpfc_sli4_sp_handle_cqe,
14672 &delay, LPFC_QUEUE_WORK);
14675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14676 "0370 Invalid completion queue type (%d)\n",
14682 if (is_kdump_kernel())
14683 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14686 ret = queue_delayed_work_on(cq->chann, phba->wq,
14687 &cq->sched_spwork, delay);
14689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14690 "0394 Cannot schedule queue work "
14691 "for cqid=%d on CPU %d\n",
14692 cq->queue_id, cq->chann);
14695 /* wake up worker thread if there are works to be done */
14697 lpfc_worker_wake_up(phba);
14701 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14703 * @work: pointer to work element
14705 * translates from the work handler and calls the slow-path handler.
14708 lpfc_sli4_sp_process_cq(struct work_struct *work)
14710 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14712 __lpfc_sli4_sp_process_cq(cq);
14716 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14717 * @work: pointer to work element
14719 * translates from the work handler and calls the slow-path handler.
14722 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14724 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14725 struct lpfc_queue, sched_spwork);
14727 __lpfc_sli4_sp_process_cq(cq);
14731 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14732 * @phba: Pointer to HBA context object.
14733 * @cq: Pointer to associated CQ
14734 * @wcqe: Pointer to work-queue completion queue entry.
14736 * This routine process a fast-path work queue completion entry from fast-path
14737 * event queue for FCP command response completion.
14740 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14741 struct lpfc_wcqe_complete *wcqe)
14743 struct lpfc_sli_ring *pring = cq->pring;
14744 struct lpfc_iocbq *cmdiocbq;
14745 unsigned long iflags;
14747 /* Check for response status */
14748 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14749 /* If resource errors reported from HBA, reduce queue
14750 * depth of the SCSI device.
14752 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14753 IOSTAT_LOCAL_REJECT)) &&
14754 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14755 IOERR_NO_RESOURCES))
14756 phba->lpfc_rampdown_queue_depth(phba);
14758 /* Log the cmpl status */
14759 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14760 "0373 FCP CQE cmpl: status=x%x: "
14761 "CQE: %08x %08x %08x %08x\n",
14762 bf_get(lpfc_wcqe_c_status, wcqe),
14763 wcqe->word0, wcqe->total_data_placed,
14764 wcqe->parameter, wcqe->word3);
14767 /* Look up the FCP command IOCB and create pseudo response IOCB */
14768 spin_lock_irqsave(&pring->ring_lock, iflags);
14769 pring->stats.iocb_event++;
14770 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14771 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14772 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14773 if (unlikely(!cmdiocbq)) {
14774 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14775 "0374 FCP complete with no corresponding "
14776 "cmdiocb: iotag (%d)\n",
14777 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14780 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14781 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14783 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14784 spin_lock_irqsave(&phba->hbalock, iflags);
14785 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14786 spin_unlock_irqrestore(&phba->hbalock, iflags);
14789 if (cmdiocbq->cmd_cmpl) {
14790 /* For FCP the flag is cleared in cmd_cmpl */
14791 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
14792 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
14793 spin_lock_irqsave(&phba->hbalock, iflags);
14794 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
14795 spin_unlock_irqrestore(&phba->hbalock, iflags);
14798 /* Pass the cmd_iocb and the wcqe to the upper layer */
14799 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
14800 sizeof(struct lpfc_wcqe_complete));
14801 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
14803 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14804 "0375 FCP cmdiocb not callback function "
14806 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14811 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14812 * @phba: Pointer to HBA context object.
14813 * @cq: Pointer to completion queue.
14814 * @wcqe: Pointer to work-queue completion queue entry.
14816 * This routine handles an fast-path WQ entry consumed event by invoking the
14817 * proper WQ release routine to the slow-path WQ.
14820 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14821 struct lpfc_wcqe_release *wcqe)
14823 struct lpfc_queue *childwq;
14824 bool wqid_matched = false;
14827 /* Check for fast-path FCP work queue release */
14828 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14829 list_for_each_entry(childwq, &cq->child_list, list) {
14830 if (childwq->queue_id == hba_wqid) {
14831 lpfc_sli4_wq_release(childwq,
14832 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14833 if (childwq->q_flag & HBA_NVMET_WQFULL)
14834 lpfc_nvmet_wqfull_process(phba, childwq);
14835 wqid_matched = true;
14839 /* Report warning log message if no match found */
14840 if (wqid_matched != true)
14841 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14842 "2580 Fast-path wqe consume event carries "
14843 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14847 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14848 * @phba: Pointer to HBA context object.
14849 * @cq: Pointer to completion queue.
14850 * @rcqe: Pointer to receive-queue completion queue entry.
14852 * This routine process a receive-queue completion queue entry.
14854 * Return: true if work posted to worker thread, otherwise false.
14857 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14858 struct lpfc_rcqe *rcqe)
14860 bool workposted = false;
14861 struct lpfc_queue *hrq;
14862 struct lpfc_queue *drq;
14863 struct rqb_dmabuf *dma_buf;
14864 struct fc_frame_header *fc_hdr;
14865 struct lpfc_nvmet_tgtport *tgtp;
14866 uint32_t status, rq_id;
14867 unsigned long iflags;
14868 uint32_t fctl, idx;
14870 if ((phba->nvmet_support == 0) ||
14871 (phba->sli4_hba.nvmet_cqset == NULL))
14874 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14875 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14876 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14878 /* sanity check on queue memory */
14879 if (unlikely(!hrq) || unlikely(!drq))
14882 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14883 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14885 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14887 if ((phba->nvmet_support == 0) ||
14888 (rq_id != hrq->queue_id))
14891 status = bf_get(lpfc_rcqe_status, rcqe);
14893 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14895 "6126 Receive Frame Truncated!!\n");
14897 case FC_STATUS_RQ_SUCCESS:
14898 spin_lock_irqsave(&phba->hbalock, iflags);
14899 lpfc_sli4_rq_release(hrq, drq);
14900 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14902 hrq->RQ_no_buf_found++;
14903 spin_unlock_irqrestore(&phba->hbalock, iflags);
14906 spin_unlock_irqrestore(&phba->hbalock, iflags);
14908 hrq->RQ_buf_posted--;
14909 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14911 /* Just some basic sanity checks on FCP Command frame */
14912 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14913 fc_hdr->fh_f_ctl[1] << 8 |
14914 fc_hdr->fh_f_ctl[2]);
14916 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14917 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14918 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14921 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14922 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14923 lpfc_nvmet_unsol_fcp_event(
14924 phba, idx, dma_buf, cq->isr_timestamp,
14925 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14929 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14931 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14932 if (phba->nvmet_support) {
14933 tgtp = phba->targetport->private;
14934 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14935 "6401 RQE Error x%x, posted %d err_cnt "
14937 status, hrq->RQ_buf_posted,
14938 hrq->RQ_no_posted_buf,
14939 atomic_read(&tgtp->rcv_fcp_cmd_in),
14940 atomic_read(&tgtp->rcv_fcp_cmd_out),
14941 atomic_read(&tgtp->xmt_fcp_release));
14945 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14946 hrq->RQ_no_posted_buf++;
14947 /* Post more buffers if possible */
14955 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14956 * @phba: adapter with cq
14957 * @cq: Pointer to the completion queue.
14958 * @cqe: Pointer to fast-path completion queue entry.
14960 * This routine process a fast-path work queue completion entry from fast-path
14961 * event queue for FCP command response completion.
14963 * Return: true if work posted to worker thread, otherwise false.
14966 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14967 struct lpfc_cqe *cqe)
14969 struct lpfc_wcqe_release wcqe;
14970 bool workposted = false;
14972 /* Copy the work queue CQE and convert endian order if needed */
14973 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14975 /* Check and process for different type of WCQE and dispatch */
14976 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14977 case CQE_CODE_COMPL_WQE:
14978 case CQE_CODE_NVME_ERSP:
14980 /* Process the WQ complete event */
14981 phba->last_completion_time = jiffies;
14982 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14983 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14984 (struct lpfc_wcqe_complete *)&wcqe);
14986 case CQE_CODE_RELEASE_WQE:
14987 cq->CQ_release_wqe++;
14988 /* Process the WQ release event */
14989 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14990 (struct lpfc_wcqe_release *)&wcqe);
14992 case CQE_CODE_XRI_ABORTED:
14993 cq->CQ_xri_aborted++;
14994 /* Process the WQ XRI abort event */
14995 phba->last_completion_time = jiffies;
14996 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14997 (struct sli4_wcqe_xri_aborted *)&wcqe);
14999 case CQE_CODE_RECEIVE_V1:
15000 case CQE_CODE_RECEIVE:
15001 phba->last_completion_time = jiffies;
15002 if (cq->subtype == LPFC_NVMET) {
15003 workposted = lpfc_sli4_nvmet_handle_rcqe(
15004 phba, cq, (struct lpfc_rcqe *)&wcqe);
15008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15009 "0144 Not a valid CQE code: x%x\n",
15010 bf_get(lpfc_wcqe_c_code, &wcqe));
15017 * lpfc_sli4_sched_cq_work - Schedules cq work
15018 * @phba: Pointer to HBA context object.
15019 * @cq: Pointer to CQ
15022 * This routine checks the poll mode of the CQ corresponding to
15023 * cq->chann, then either schedules a softirq or queue_work to complete
15026 * queue_work path is taken if in NVMET mode, or if poll_mode is in
15027 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
15030 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15031 struct lpfc_queue *cq, uint16_t cqid)
15035 switch (cq->poll_mode) {
15036 case LPFC_IRQ_POLL:
15037 /* CGN mgmt is mutually exclusive from softirq processing */
15038 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15039 irq_poll_sched(&cq->iop);
15043 case LPFC_QUEUE_WORK:
15045 if (is_kdump_kernel())
15046 ret = queue_work(phba->wq, &cq->irqwork);
15048 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15051 "0383 Cannot schedule queue work "
15052 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15053 cqid, cq->queue_id,
15054 raw_smp_processor_id());
15059 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15060 * @phba: Pointer to HBA context object.
15061 * @eq: Pointer to the queue structure.
15062 * @eqe: Pointer to fast-path event queue entry.
15064 * This routine process a event queue entry from the fast-path event queue.
15065 * It will check the MajorCode and MinorCode to determine this is for a
15066 * completion event on a completion queue, if not, an error shall be logged
15067 * and just return. Otherwise, it will get to the corresponding completion
15068 * queue and process all the entries on the completion queue, rearm the
15069 * completion queue, and then return.
15072 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15073 struct lpfc_eqe *eqe)
15075 struct lpfc_queue *cq = NULL;
15076 uint32_t qidx = eq->hdwq;
15079 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15080 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15081 "0366 Not a valid completion "
15082 "event: majorcode=x%x, minorcode=x%x\n",
15083 bf_get_le32(lpfc_eqe_major_code, eqe),
15084 bf_get_le32(lpfc_eqe_minor_code, eqe));
15088 /* Get the reference to the corresponding CQ */
15089 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15091 /* Use the fast lookup method first */
15092 if (cqid <= phba->sli4_hba.cq_max) {
15093 cq = phba->sli4_hba.cq_lookup[cqid];
15098 /* Next check for NVMET completion */
15099 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15100 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15101 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15102 /* Process NVMET unsol rcv */
15103 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15108 if (phba->sli4_hba.nvmels_cq &&
15109 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15110 /* Process NVME unsol rcv */
15111 cq = phba->sli4_hba.nvmels_cq;
15114 /* Otherwise this is a Slow path event */
15116 lpfc_sli4_sp_handle_eqe(phba, eqe,
15117 phba->sli4_hba.hdwq[qidx].hba_eq);
15122 if (unlikely(cqid != cq->queue_id)) {
15123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15124 "0368 Miss-matched fast-path completion "
15125 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15126 cqid, cq->queue_id);
15131 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15132 if (phba->ktime_on)
15133 cq->isr_timestamp = ktime_get_ns();
15135 cq->isr_timestamp = 0;
15137 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15141 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15142 * @cq: Pointer to CQ to be processed
15143 * @poll_mode: Enum lpfc_poll_state to determine poll mode
15145 * This routine calls the cq processing routine with the handler for
15148 * The CQ routine returns two values: the first is the calling status,
15149 * which indicates whether work was queued to the background discovery
15150 * thread. If true, the routine should wakeup the discovery thread;
15151 * the second is the delay parameter. If non-zero, rather than rearming
15152 * the CQ and yet another interrupt, the CQ handler should be queued so
15153 * that it is processed in a subsequent polling action. The value of
15154 * the delay indicates when to reschedule it.
15157 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15158 enum lpfc_poll_mode poll_mode)
15160 struct lpfc_hba *phba = cq->phba;
15161 unsigned long delay;
15162 bool workposted = false;
15165 /* process and rearm the CQ */
15166 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15167 &delay, poll_mode);
15170 if (is_kdump_kernel())
15171 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15174 ret = queue_delayed_work_on(cq->chann, phba->wq,
15175 &cq->sched_irqwork, delay);
15177 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15178 "0367 Cannot schedule queue work "
15179 "for cqid=%d on CPU %d\n",
15180 cq->queue_id, cq->chann);
15183 /* wake up worker thread if there are works to be done */
15185 lpfc_worker_wake_up(phba);
15189 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15191 * @work: pointer to work element
15193 * translates from the work handler and calls the fast-path handler.
15196 lpfc_sli4_hba_process_cq(struct work_struct *work)
15198 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15200 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15204 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15205 * @work: pointer to work element
15207 * translates from the work handler and calls the fast-path handler.
15210 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15212 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15213 struct lpfc_queue, sched_irqwork);
15215 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15219 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15220 * @irq: Interrupt number.
15221 * @dev_id: The device context pointer.
15223 * This function is directly called from the PCI layer as an interrupt
15224 * service routine when device with SLI-4 interface spec is enabled with
15225 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15226 * ring event in the HBA. However, when the device is enabled with either
15227 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15228 * device-level interrupt handler. When the PCI slot is in error recovery
15229 * or the HBA is undergoing initialization, the interrupt handler will not
15230 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15231 * the intrrupt context. This function is called without any lock held.
15232 * It gets the hbalock to access and update SLI data structures. Note that,
15233 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15234 * equal to that of FCP CQ index.
15236 * The link attention and ELS ring attention events are handled
15237 * by the worker thread. The interrupt handler signals the worker thread
15238 * and returns for these events. This function is called without any lock
15239 * held. It gets the hbalock to access and update SLI data structures.
15241 * This function returns IRQ_HANDLED when interrupt is handled else it
15242 * returns IRQ_NONE.
15245 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15247 struct lpfc_hba *phba;
15248 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15249 struct lpfc_queue *fpeq;
15250 unsigned long iflag;
15253 struct lpfc_eq_intr_info *eqi;
15255 /* Get the driver's phba structure from the dev_id */
15256 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15257 phba = hba_eq_hdl->phba;
15258 hba_eqidx = hba_eq_hdl->idx;
15260 if (unlikely(!phba))
15262 if (unlikely(!phba->sli4_hba.hdwq))
15265 /* Get to the EQ struct associated with this vector */
15266 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15267 if (unlikely(!fpeq))
15270 /* Check device state for handling interrupt */
15271 if (unlikely(lpfc_intr_state_check(phba))) {
15272 /* Check again for link_state with lock held */
15273 spin_lock_irqsave(&phba->hbalock, iflag);
15274 if (phba->link_state < LPFC_LINK_DOWN)
15275 /* Flush, clear interrupt, and rearm the EQ */
15276 lpfc_sli4_eqcq_flush(phba, fpeq);
15277 spin_unlock_irqrestore(&phba->hbalock, iflag);
15281 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15284 fpeq->last_cpu = raw_smp_processor_id();
15286 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15287 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15288 phba->cfg_auto_imax &&
15289 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15290 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15291 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15293 /* process and rearm the EQ */
15294 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15296 if (unlikely(ecount == 0)) {
15297 fpeq->EQ_no_entry++;
15298 if (phba->intr_type == MSIX)
15299 /* MSI-X treated interrupt served as no EQ share INT */
15300 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15301 "0358 MSI-X interrupt with no EQE\n");
15303 /* Non MSI-X treated on interrupt as EQ share INT */
15307 return IRQ_HANDLED;
15308 } /* lpfc_sli4_hba_intr_handler */
15311 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15312 * @irq: Interrupt number.
15313 * @dev_id: The device context pointer.
15315 * This function is the device-level interrupt handler to device with SLI-4
15316 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15317 * interrupt mode is enabled and there is an event in the HBA which requires
15318 * driver attention. This function invokes the slow-path interrupt attention
15319 * handling function and fast-path interrupt attention handling function in
15320 * turn to process the relevant HBA attention events. This function is called
15321 * without any lock held. It gets the hbalock to access and update SLI data
15324 * This function returns IRQ_HANDLED when interrupt is handled, else it
15325 * returns IRQ_NONE.
15328 lpfc_sli4_intr_handler(int irq, void *dev_id)
15330 struct lpfc_hba *phba;
15331 irqreturn_t hba_irq_rc;
15332 bool hba_handled = false;
15335 /* Get the driver's phba structure from the dev_id */
15336 phba = (struct lpfc_hba *)dev_id;
15338 if (unlikely(!phba))
15342 * Invoke fast-path host attention interrupt handling as appropriate.
15344 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15345 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15346 &phba->sli4_hba.hba_eq_hdl[qidx]);
15347 if (hba_irq_rc == IRQ_HANDLED)
15348 hba_handled |= true;
15351 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15352 } /* lpfc_sli4_intr_handler */
15354 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15356 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15357 struct lpfc_queue *eq;
15362 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15363 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15364 if (!list_empty(&phba->poll_list))
15365 mod_timer(&phba->cpuhp_poll_timer,
15366 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15371 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15373 struct lpfc_hba *phba = eq->phba;
15377 * Unlocking an irq is one of the entry point to check
15378 * for re-schedule, but we are good for io submission
15379 * path as midlayer does a get_cpu to glue us in. Flush
15380 * out the invalidate queue so we can see the updated
15385 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15386 /* We will not likely get the completion for the caller
15387 * during this iteration but i guess that's fine.
15388 * Future io's coming on this eq should be able to
15389 * pick it up. As for the case of single io's, they
15390 * will be handled through a sched from polling timer
15391 * function which is currently triggered every 1msec.
15393 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15398 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15400 struct lpfc_hba *phba = eq->phba;
15402 /* kickstart slowpath processing if needed */
15403 if (list_empty(&phba->poll_list))
15404 mod_timer(&phba->cpuhp_poll_timer,
15405 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15407 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15411 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15413 struct lpfc_hba *phba = eq->phba;
15415 /* Disable slowpath processing for this eq. Kick start the eq
15416 * by RE-ARMING the eq's ASAP
15418 list_del_rcu(&eq->_poll_list);
15421 if (list_empty(&phba->poll_list))
15422 del_timer_sync(&phba->cpuhp_poll_timer);
15425 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15427 struct lpfc_queue *eq, *next;
15429 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15430 list_del(&eq->_poll_list);
15432 INIT_LIST_HEAD(&phba->poll_list);
15437 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15439 if (mode == eq->mode)
15442 * currently this function is only called during a hotplug
15443 * event and the cpu on which this function is executing
15444 * is going offline. By now the hotplug has instructed
15445 * the scheduler to remove this cpu from cpu active mask.
15446 * So we don't need to work about being put aside by the
15447 * scheduler for a high priority process. Yes, the inte-
15448 * rrupts could come but they are known to retire ASAP.
15451 /* Disable polling in the fastpath */
15452 WRITE_ONCE(eq->mode, mode);
15453 /* flush out the store buffer */
15457 * Add this eq to the polling list and start polling. For
15458 * a grace period both interrupt handler and poller will
15459 * try to process the eq _but_ that's fine. We have a
15460 * synchronization mechanism in place (queue_claimed) to
15461 * deal with it. This is just a draining phase for int-
15462 * errupt handler (not eq's) as we have guranteed through
15463 * barrier that all the CPUs have seen the new CQ_POLLED
15464 * state. which will effectively disable the REARMING of
15465 * the EQ. The whole idea is eq's die off eventually as
15466 * we are not rearming EQ's anymore.
15468 mode ? lpfc_sli4_add_to_poll_list(eq) :
15469 lpfc_sli4_remove_from_poll_list(eq);
15472 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15474 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15477 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15479 struct lpfc_hba *phba = eq->phba;
15481 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15483 /* Kick start for the pending io's in h/w.
15484 * Once we switch back to interrupt processing on a eq
15485 * the io path completion will only arm eq's when it
15486 * receives a completion. But since eq's are in disa-
15487 * rmed state it doesn't receive a completion. This
15488 * creates a deadlock scenaro.
15490 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15494 * lpfc_sli4_queue_free - free a queue structure and associated memory
15495 * @queue: The queue structure to free.
15497 * This function frees a queue structure and the DMAable memory used for
15498 * the host resident queue. This function must be called after destroying the
15499 * queue on the HBA.
15502 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15504 struct lpfc_dmabuf *dmabuf;
15509 if (!list_empty(&queue->wq_list))
15510 list_del(&queue->wq_list);
15512 while (!list_empty(&queue->page_list)) {
15513 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15515 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15516 dmabuf->virt, dmabuf->phys);
15520 lpfc_free_rq_buffer(queue->phba, queue);
15521 kfree(queue->rqbp);
15524 if (!list_empty(&queue->cpu_list))
15525 list_del(&queue->cpu_list);
15532 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15533 * @phba: The HBA that this queue is being created on.
15534 * @page_size: The size of a queue page
15535 * @entry_size: The size of each queue entry for this queue.
15536 * @entry_count: The number of entries that this queue will handle.
15537 * @cpu: The cpu that will primarily utilize this queue.
15539 * This function allocates a queue structure and the DMAable memory used for
15540 * the host resident queue. This function must be called before creating the
15541 * queue on the HBA.
15543 struct lpfc_queue *
15544 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15545 uint32_t entry_size, uint32_t entry_count, int cpu)
15547 struct lpfc_queue *queue;
15548 struct lpfc_dmabuf *dmabuf;
15549 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15552 if (!phba->sli4_hba.pc_sli4_params.supported)
15553 hw_page_size = page_size;
15555 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15557 /* If needed, Adjust page count to match the max the adapter supports */
15558 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15559 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15561 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15562 GFP_KERNEL, cpu_to_node(cpu));
15566 INIT_LIST_HEAD(&queue->list);
15567 INIT_LIST_HEAD(&queue->_poll_list);
15568 INIT_LIST_HEAD(&queue->wq_list);
15569 INIT_LIST_HEAD(&queue->wqfull_list);
15570 INIT_LIST_HEAD(&queue->page_list);
15571 INIT_LIST_HEAD(&queue->child_list);
15572 INIT_LIST_HEAD(&queue->cpu_list);
15574 /* Set queue parameters now. If the system cannot provide memory
15575 * resources, the free routine needs to know what was allocated.
15577 queue->page_count = pgcnt;
15578 queue->q_pgs = (void **)&queue[1];
15579 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15580 queue->entry_size = entry_size;
15581 queue->entry_count = entry_count;
15582 queue->page_size = hw_page_size;
15583 queue->phba = phba;
15585 for (x = 0; x < queue->page_count; x++) {
15586 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15587 dev_to_node(&phba->pcidev->dev));
15590 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15591 hw_page_size, &dmabuf->phys,
15593 if (!dmabuf->virt) {
15597 dmabuf->buffer_tag = x;
15598 list_add_tail(&dmabuf->list, &queue->page_list);
15599 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15600 queue->q_pgs[x] = dmabuf->virt;
15602 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15603 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15604 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15605 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15607 /* notify_interval will be set during q creation */
15611 lpfc_sli4_queue_free(queue);
15616 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15617 * @phba: HBA structure that indicates port to create a queue on.
15618 * @pci_barset: PCI BAR set flag.
15620 * This function shall perform iomap of the specified PCI BAR address to host
15621 * memory address if not already done so and return it. The returned host
15622 * memory address can be NULL.
15624 static void __iomem *
15625 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15630 switch (pci_barset) {
15631 case WQ_PCI_BAR_0_AND_1:
15632 return phba->pci_bar0_memmap_p;
15633 case WQ_PCI_BAR_2_AND_3:
15634 return phba->pci_bar2_memmap_p;
15635 case WQ_PCI_BAR_4_AND_5:
15636 return phba->pci_bar4_memmap_p;
15644 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15645 * @phba: HBA structure that EQs are on.
15646 * @startq: The starting EQ index to modify
15647 * @numq: The number of EQs (consecutive indexes) to modify
15648 * @usdelay: amount of delay
15650 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15651 * is set either by writing to a register (if supported by the SLI Port)
15652 * or by mailbox command. The mailbox command allows several EQs to be
15655 * The @phba struct is used to send a mailbox command to HBA. The @startq
15656 * is used to get the starting EQ index to change. The @numq value is
15657 * used to specify how many consecutive EQ indexes, starting at EQ index,
15658 * are to be changed. This function is asynchronous and will wait for any
15659 * mailbox commands to finish before returning.
15661 * On success this function will return a zero. If unable to allocate
15662 * enough memory this function will return -ENOMEM. If a mailbox command
15663 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15664 * have had their delay multipler changed.
15667 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15668 uint32_t numq, uint32_t usdelay)
15670 struct lpfc_mbx_modify_eq_delay *eq_delay;
15671 LPFC_MBOXQ_t *mbox;
15672 struct lpfc_queue *eq;
15673 int cnt = 0, rc, length;
15674 uint32_t shdr_status, shdr_add_status;
15677 union lpfc_sli4_cfg_shdr *shdr;
15679 if (startq >= phba->cfg_irq_chann)
15682 if (usdelay > 0xFFFF) {
15683 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15684 "6429 usdelay %d too large. Scaled down to "
15685 "0xFFFF.\n", usdelay);
15689 /* set values by EQ_DELAY register if supported */
15690 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15691 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15692 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15696 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15704 /* Otherwise, set values by mailbox cmd */
15706 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15709 "6428 Failed allocating mailbox cmd buffer."
15710 " EQ delay was not set.\n");
15713 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15714 sizeof(struct lpfc_sli4_cfg_mhdr));
15715 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15716 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15717 length, LPFC_SLI4_MBX_EMBED);
15718 eq_delay = &mbox->u.mqe.un.eq_delay;
15720 /* Calculate delay multiper from maximum interrupt per second */
15721 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15724 if (dmult > LPFC_DMULT_MAX)
15725 dmult = LPFC_DMULT_MAX;
15727 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15728 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15731 eq->q_mode = usdelay;
15732 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15733 eq_delay->u.request.eq[cnt].phase = 0;
15734 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15739 eq_delay->u.request.num_eq = cnt;
15741 mbox->vport = phba->pport;
15742 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15743 mbox->ctx_ndlp = NULL;
15744 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15745 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15746 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15747 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15748 if (shdr_status || shdr_add_status || rc) {
15749 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15750 "2512 MODIFY_EQ_DELAY mailbox failed with "
15751 "status x%x add_status x%x, mbx status x%x\n",
15752 shdr_status, shdr_add_status, rc);
15754 mempool_free(mbox, phba->mbox_mem_pool);
15759 * lpfc_eq_create - Create an Event Queue on the HBA
15760 * @phba: HBA structure that indicates port to create a queue on.
15761 * @eq: The queue structure to use to create the event queue.
15762 * @imax: The maximum interrupt per second limit.
15764 * This function creates an event queue, as detailed in @eq, on a port,
15765 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15767 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15768 * is used to get the entry count and entry size that are necessary to
15769 * determine the number of pages to allocate and use for this queue. This
15770 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15771 * event queue. This function is asynchronous and will wait for the mailbox
15772 * command to finish before continuing.
15774 * On success this function will return a zero. If unable to allocate enough
15775 * memory this function will return -ENOMEM. If the queue create mailbox command
15776 * fails this function will return -ENXIO.
15779 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15781 struct lpfc_mbx_eq_create *eq_create;
15782 LPFC_MBOXQ_t *mbox;
15783 int rc, length, status = 0;
15784 struct lpfc_dmabuf *dmabuf;
15785 uint32_t shdr_status, shdr_add_status;
15786 union lpfc_sli4_cfg_shdr *shdr;
15788 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15790 /* sanity check on queue memory */
15793 if (!phba->sli4_hba.pc_sli4_params.supported)
15794 hw_page_size = SLI4_PAGE_SIZE;
15796 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15799 length = (sizeof(struct lpfc_mbx_eq_create) -
15800 sizeof(struct lpfc_sli4_cfg_mhdr));
15801 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15802 LPFC_MBOX_OPCODE_EQ_CREATE,
15803 length, LPFC_SLI4_MBX_EMBED);
15804 eq_create = &mbox->u.mqe.un.eq_create;
15805 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15806 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15808 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15810 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15812 /* Use version 2 of CREATE_EQ if eqav is set */
15813 if (phba->sli4_hba.pc_sli4_params.eqav) {
15814 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15815 LPFC_Q_CREATE_VERSION_2);
15816 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15817 phba->sli4_hba.pc_sli4_params.eqav);
15820 /* don't setup delay multiplier using EQ_CREATE */
15822 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15824 switch (eq->entry_count) {
15826 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15827 "0360 Unsupported EQ count. (%d)\n",
15829 if (eq->entry_count < 256) {
15833 fallthrough; /* otherwise default to smallest count */
15835 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15839 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15843 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15847 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15851 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15855 list_for_each_entry(dmabuf, &eq->page_list, list) {
15856 memset(dmabuf->virt, 0, hw_page_size);
15857 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15858 putPaddrLow(dmabuf->phys);
15859 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15860 putPaddrHigh(dmabuf->phys);
15862 mbox->vport = phba->pport;
15863 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15864 mbox->ctx_buf = NULL;
15865 mbox->ctx_ndlp = NULL;
15866 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15867 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15868 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15869 if (shdr_status || shdr_add_status || rc) {
15870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15871 "2500 EQ_CREATE mailbox failed with "
15872 "status x%x add_status x%x, mbx status x%x\n",
15873 shdr_status, shdr_add_status, rc);
15876 eq->type = LPFC_EQ;
15877 eq->subtype = LPFC_NONE;
15878 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15879 if (eq->queue_id == 0xFFFF)
15881 eq->host_index = 0;
15882 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15883 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15885 mempool_free(mbox, phba->mbox_mem_pool);
15889 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15891 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15893 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15899 * lpfc_cq_create - Create a Completion Queue on the HBA
15900 * @phba: HBA structure that indicates port to create a queue on.
15901 * @cq: The queue structure to use to create the completion queue.
15902 * @eq: The event queue to bind this completion queue to.
15903 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15904 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15906 * This function creates a completion queue, as detailed in @wq, on a port,
15907 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15909 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15910 * is used to get the entry count and entry size that are necessary to
15911 * determine the number of pages to allocate and use for this queue. The @eq
15912 * is used to indicate which event queue to bind this completion queue to. This
15913 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15914 * completion queue. This function is asynchronous and will wait for the mailbox
15915 * command to finish before continuing.
15917 * On success this function will return a zero. If unable to allocate enough
15918 * memory this function will return -ENOMEM. If the queue create mailbox command
15919 * fails this function will return -ENXIO.
15922 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15923 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15925 struct lpfc_mbx_cq_create *cq_create;
15926 struct lpfc_dmabuf *dmabuf;
15927 LPFC_MBOXQ_t *mbox;
15928 int rc, length, status = 0;
15929 uint32_t shdr_status, shdr_add_status;
15930 union lpfc_sli4_cfg_shdr *shdr;
15932 /* sanity check on queue memory */
15936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15939 length = (sizeof(struct lpfc_mbx_cq_create) -
15940 sizeof(struct lpfc_sli4_cfg_mhdr));
15941 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15942 LPFC_MBOX_OPCODE_CQ_CREATE,
15943 length, LPFC_SLI4_MBX_EMBED);
15944 cq_create = &mbox->u.mqe.un.cq_create;
15945 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15946 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15948 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15949 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15950 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15951 phba->sli4_hba.pc_sli4_params.cqv);
15952 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15953 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15954 (cq->page_size / SLI4_PAGE_SIZE));
15955 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15957 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15958 phba->sli4_hba.pc_sli4_params.cqav);
15960 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15963 switch (cq->entry_count) {
15966 if (phba->sli4_hba.pc_sli4_params.cqv ==
15967 LPFC_Q_CREATE_VERSION_2) {
15968 cq_create->u.request.context.lpfc_cq_context_count =
15970 bf_set(lpfc_cq_context_count,
15971 &cq_create->u.request.context,
15972 LPFC_CQ_CNT_WORD7);
15977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15978 "0361 Unsupported CQ count: "
15979 "entry cnt %d sz %d pg cnt %d\n",
15980 cq->entry_count, cq->entry_size,
15982 if (cq->entry_count < 256) {
15986 fallthrough; /* otherwise default to smallest count */
15988 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15992 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15996 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16000 list_for_each_entry(dmabuf, &cq->page_list, list) {
16001 memset(dmabuf->virt, 0, cq->page_size);
16002 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16003 putPaddrLow(dmabuf->phys);
16004 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16005 putPaddrHigh(dmabuf->phys);
16007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16009 /* The IOCTL status is embedded in the mailbox subheader. */
16010 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16011 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16012 if (shdr_status || shdr_add_status || rc) {
16013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16014 "2501 CQ_CREATE mailbox failed with "
16015 "status x%x add_status x%x, mbx status x%x\n",
16016 shdr_status, shdr_add_status, rc);
16020 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16021 if (cq->queue_id == 0xFFFF) {
16025 /* link the cq onto the parent eq child list */
16026 list_add_tail(&cq->list, &eq->child_list);
16027 /* Set up completion queue's type and subtype */
16029 cq->subtype = subtype;
16030 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16031 cq->assoc_qid = eq->queue_id;
16033 cq->host_index = 0;
16034 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16035 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16037 if (cq->queue_id > phba->sli4_hba.cq_max)
16038 phba->sli4_hba.cq_max = cq->queue_id;
16040 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16042 mempool_free(mbox, phba->mbox_mem_pool);
16047 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16048 * @phba: HBA structure that indicates port to create a queue on.
16049 * @cqp: The queue structure array to use to create the completion queues.
16050 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16051 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16052 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16054 * This function creates a set of completion queue, s to support MRQ
16055 * as detailed in @cqp, on a port,
16056 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16058 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16059 * is used to get the entry count and entry size that are necessary to
16060 * determine the number of pages to allocate and use for this queue. The @eq
16061 * is used to indicate which event queue to bind this completion queue to. This
16062 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16063 * completion queue. This function is asynchronous and will wait for the mailbox
16064 * command to finish before continuing.
16066 * On success this function will return a zero. If unable to allocate enough
16067 * memory this function will return -ENOMEM. If the queue create mailbox command
16068 * fails this function will return -ENXIO.
16071 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16072 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16075 struct lpfc_queue *cq;
16076 struct lpfc_queue *eq;
16077 struct lpfc_mbx_cq_create_set *cq_set;
16078 struct lpfc_dmabuf *dmabuf;
16079 LPFC_MBOXQ_t *mbox;
16080 int rc, length, alloclen, status = 0;
16081 int cnt, idx, numcq, page_idx = 0;
16082 uint32_t shdr_status, shdr_add_status;
16083 union lpfc_sli4_cfg_shdr *shdr;
16084 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16086 /* sanity check on queue memory */
16087 numcq = phba->cfg_nvmet_mrq;
16088 if (!cqp || !hdwq || !numcq)
16091 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16095 length = sizeof(struct lpfc_mbx_cq_create_set);
16096 length += ((numcq * cqp[0]->page_count) *
16097 sizeof(struct dma_address));
16098 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16099 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16100 LPFC_SLI4_MBX_NEMBED);
16101 if (alloclen < length) {
16102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16103 "3098 Allocated DMA memory size (%d) is "
16104 "less than the requested DMA memory size "
16105 "(%d)\n", alloclen, length);
16109 cq_set = mbox->sge_array->addr[0];
16110 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16111 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16113 for (idx = 0; idx < numcq; idx++) {
16115 eq = hdwq[idx].hba_eq;
16120 if (!phba->sli4_hba.pc_sli4_params.supported)
16121 hw_page_size = cq->page_size;
16125 bf_set(lpfc_mbx_cq_create_set_page_size,
16126 &cq_set->u.request,
16127 (hw_page_size / SLI4_PAGE_SIZE));
16128 bf_set(lpfc_mbx_cq_create_set_num_pages,
16129 &cq_set->u.request, cq->page_count);
16130 bf_set(lpfc_mbx_cq_create_set_evt,
16131 &cq_set->u.request, 1);
16132 bf_set(lpfc_mbx_cq_create_set_valid,
16133 &cq_set->u.request, 1);
16134 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16135 &cq_set->u.request, 0);
16136 bf_set(lpfc_mbx_cq_create_set_num_cq,
16137 &cq_set->u.request, numcq);
16138 bf_set(lpfc_mbx_cq_create_set_autovalid,
16139 &cq_set->u.request,
16140 phba->sli4_hba.pc_sli4_params.cqav);
16141 switch (cq->entry_count) {
16144 if (phba->sli4_hba.pc_sli4_params.cqv ==
16145 LPFC_Q_CREATE_VERSION_2) {
16146 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16147 &cq_set->u.request,
16149 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16150 &cq_set->u.request,
16151 LPFC_CQ_CNT_WORD7);
16156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16157 "3118 Bad CQ count. (%d)\n",
16159 if (cq->entry_count < 256) {
16163 fallthrough; /* otherwise default to smallest */
16165 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16166 &cq_set->u.request, LPFC_CQ_CNT_256);
16169 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16170 &cq_set->u.request, LPFC_CQ_CNT_512);
16173 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16174 &cq_set->u.request, LPFC_CQ_CNT_1024);
16177 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16178 &cq_set->u.request, eq->queue_id);
16181 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16182 &cq_set->u.request, eq->queue_id);
16185 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16186 &cq_set->u.request, eq->queue_id);
16189 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16190 &cq_set->u.request, eq->queue_id);
16193 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16194 &cq_set->u.request, eq->queue_id);
16197 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16198 &cq_set->u.request, eq->queue_id);
16201 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16202 &cq_set->u.request, eq->queue_id);
16205 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16206 &cq_set->u.request, eq->queue_id);
16209 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16210 &cq_set->u.request, eq->queue_id);
16213 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16214 &cq_set->u.request, eq->queue_id);
16217 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16218 &cq_set->u.request, eq->queue_id);
16221 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16222 &cq_set->u.request, eq->queue_id);
16225 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16226 &cq_set->u.request, eq->queue_id);
16229 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16230 &cq_set->u.request, eq->queue_id);
16233 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16234 &cq_set->u.request, eq->queue_id);
16237 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16238 &cq_set->u.request, eq->queue_id);
16242 /* link the cq onto the parent eq child list */
16243 list_add_tail(&cq->list, &eq->child_list);
16244 /* Set up completion queue's type and subtype */
16246 cq->subtype = subtype;
16247 cq->assoc_qid = eq->queue_id;
16249 cq->host_index = 0;
16250 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16251 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16256 list_for_each_entry(dmabuf, &cq->page_list, list) {
16257 memset(dmabuf->virt, 0, hw_page_size);
16258 cnt = page_idx + dmabuf->buffer_tag;
16259 cq_set->u.request.page[cnt].addr_lo =
16260 putPaddrLow(dmabuf->phys);
16261 cq_set->u.request.page[cnt].addr_hi =
16262 putPaddrHigh(dmabuf->phys);
16268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16270 /* The IOCTL status is embedded in the mailbox subheader. */
16271 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16272 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16273 if (shdr_status || shdr_add_status || rc) {
16274 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16275 "3119 CQ_CREATE_SET mailbox failed with "
16276 "status x%x add_status x%x, mbx status x%x\n",
16277 shdr_status, shdr_add_status, rc);
16281 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16282 if (rc == 0xFFFF) {
16287 for (idx = 0; idx < numcq; idx++) {
16289 cq->queue_id = rc + idx;
16290 if (cq->queue_id > phba->sli4_hba.cq_max)
16291 phba->sli4_hba.cq_max = cq->queue_id;
16295 lpfc_sli4_mbox_cmd_free(phba, mbox);
16300 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16301 * @phba: HBA structure that indicates port to create a queue on.
16302 * @mq: The queue structure to use to create the mailbox queue.
16303 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16304 * @cq: The completion queue to associate with this cq.
16306 * This function provides failback (fb) functionality when the
16307 * mq_create_ext fails on older FW generations. It's purpose is identical
16308 * to mq_create_ext otherwise.
16310 * This routine cannot fail as all attributes were previously accessed and
16311 * initialized in mq_create_ext.
16314 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16315 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16317 struct lpfc_mbx_mq_create *mq_create;
16318 struct lpfc_dmabuf *dmabuf;
16321 length = (sizeof(struct lpfc_mbx_mq_create) -
16322 sizeof(struct lpfc_sli4_cfg_mhdr));
16323 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16324 LPFC_MBOX_OPCODE_MQ_CREATE,
16325 length, LPFC_SLI4_MBX_EMBED);
16326 mq_create = &mbox->u.mqe.un.mq_create;
16327 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16329 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16331 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16332 switch (mq->entry_count) {
16334 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16335 LPFC_MQ_RING_SIZE_16);
16338 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16339 LPFC_MQ_RING_SIZE_32);
16342 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16343 LPFC_MQ_RING_SIZE_64);
16346 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16347 LPFC_MQ_RING_SIZE_128);
16350 list_for_each_entry(dmabuf, &mq->page_list, list) {
16351 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16352 putPaddrLow(dmabuf->phys);
16353 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16354 putPaddrHigh(dmabuf->phys);
16359 * lpfc_mq_create - Create a mailbox Queue on the HBA
16360 * @phba: HBA structure that indicates port to create a queue on.
16361 * @mq: The queue structure to use to create the mailbox queue.
16362 * @cq: The completion queue to associate with this cq.
16363 * @subtype: The queue's subtype.
16365 * This function creates a mailbox queue, as detailed in @mq, on a port,
16366 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16368 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16369 * is used to get the entry count and entry size that are necessary to
16370 * determine the number of pages to allocate and use for this queue. This
16371 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16372 * mailbox queue. This function is asynchronous and will wait for the mailbox
16373 * command to finish before continuing.
16375 * On success this function will return a zero. If unable to allocate enough
16376 * memory this function will return -ENOMEM. If the queue create mailbox command
16377 * fails this function will return -ENXIO.
16380 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16381 struct lpfc_queue *cq, uint32_t subtype)
16383 struct lpfc_mbx_mq_create *mq_create;
16384 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16385 struct lpfc_dmabuf *dmabuf;
16386 LPFC_MBOXQ_t *mbox;
16387 int rc, length, status = 0;
16388 uint32_t shdr_status, shdr_add_status;
16389 union lpfc_sli4_cfg_shdr *shdr;
16390 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16392 /* sanity check on queue memory */
16395 if (!phba->sli4_hba.pc_sli4_params.supported)
16396 hw_page_size = SLI4_PAGE_SIZE;
16398 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16401 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16402 sizeof(struct lpfc_sli4_cfg_mhdr));
16403 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16404 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16405 length, LPFC_SLI4_MBX_EMBED);
16407 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16408 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16409 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16410 &mq_create_ext->u.request, mq->page_count);
16411 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16412 &mq_create_ext->u.request, 1);
16413 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16414 &mq_create_ext->u.request, 1);
16415 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16416 &mq_create_ext->u.request, 1);
16417 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16418 &mq_create_ext->u.request, 1);
16419 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16420 &mq_create_ext->u.request, 1);
16421 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16422 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16423 phba->sli4_hba.pc_sli4_params.mqv);
16424 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16425 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16428 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16430 switch (mq->entry_count) {
16432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16433 "0362 Unsupported MQ count. (%d)\n",
16435 if (mq->entry_count < 16) {
16439 fallthrough; /* otherwise default to smallest count */
16441 bf_set(lpfc_mq_context_ring_size,
16442 &mq_create_ext->u.request.context,
16443 LPFC_MQ_RING_SIZE_16);
16446 bf_set(lpfc_mq_context_ring_size,
16447 &mq_create_ext->u.request.context,
16448 LPFC_MQ_RING_SIZE_32);
16451 bf_set(lpfc_mq_context_ring_size,
16452 &mq_create_ext->u.request.context,
16453 LPFC_MQ_RING_SIZE_64);
16456 bf_set(lpfc_mq_context_ring_size,
16457 &mq_create_ext->u.request.context,
16458 LPFC_MQ_RING_SIZE_128);
16461 list_for_each_entry(dmabuf, &mq->page_list, list) {
16462 memset(dmabuf->virt, 0, hw_page_size);
16463 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16464 putPaddrLow(dmabuf->phys);
16465 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16466 putPaddrHigh(dmabuf->phys);
16468 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16469 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16470 &mq_create_ext->u.response);
16471 if (rc != MBX_SUCCESS) {
16472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16473 "2795 MQ_CREATE_EXT failed with "
16474 "status x%x. Failback to MQ_CREATE.\n",
16476 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16477 mq_create = &mbox->u.mqe.un.mq_create;
16478 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16479 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16480 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16481 &mq_create->u.response);
16484 /* The IOCTL status is embedded in the mailbox subheader. */
16485 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16487 if (shdr_status || shdr_add_status || rc) {
16488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16489 "2502 MQ_CREATE mailbox failed with "
16490 "status x%x add_status x%x, mbx status x%x\n",
16491 shdr_status, shdr_add_status, rc);
16495 if (mq->queue_id == 0xFFFF) {
16499 mq->type = LPFC_MQ;
16500 mq->assoc_qid = cq->queue_id;
16501 mq->subtype = subtype;
16502 mq->host_index = 0;
16505 /* link the mq onto the parent cq child list */
16506 list_add_tail(&mq->list, &cq->child_list);
16508 mempool_free(mbox, phba->mbox_mem_pool);
16513 * lpfc_wq_create - Create a Work Queue on the HBA
16514 * @phba: HBA structure that indicates port to create a queue on.
16515 * @wq: The queue structure to use to create the work queue.
16516 * @cq: The completion queue to bind this work queue to.
16517 * @subtype: The subtype of the work queue indicating its functionality.
16519 * This function creates a work queue, as detailed in @wq, on a port, described
16520 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16522 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16523 * is used to get the entry count and entry size that are necessary to
16524 * determine the number of pages to allocate and use for this queue. The @cq
16525 * is used to indicate which completion queue to bind this work queue to. This
16526 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16527 * work queue. This function is asynchronous and will wait for the mailbox
16528 * command to finish before continuing.
16530 * On success this function will return a zero. If unable to allocate enough
16531 * memory this function will return -ENOMEM. If the queue create mailbox command
16532 * fails this function will return -ENXIO.
16535 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16536 struct lpfc_queue *cq, uint32_t subtype)
16538 struct lpfc_mbx_wq_create *wq_create;
16539 struct lpfc_dmabuf *dmabuf;
16540 LPFC_MBOXQ_t *mbox;
16541 int rc, length, status = 0;
16542 uint32_t shdr_status, shdr_add_status;
16543 union lpfc_sli4_cfg_shdr *shdr;
16544 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16545 struct dma_address *page;
16546 void __iomem *bar_memmap_p;
16547 uint32_t db_offset;
16548 uint16_t pci_barset;
16549 uint8_t dpp_barset;
16550 uint32_t dpp_offset;
16551 uint8_t wq_create_version;
16553 unsigned long pg_addr;
16556 /* sanity check on queue memory */
16559 if (!phba->sli4_hba.pc_sli4_params.supported)
16560 hw_page_size = wq->page_size;
16562 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16565 length = (sizeof(struct lpfc_mbx_wq_create) -
16566 sizeof(struct lpfc_sli4_cfg_mhdr));
16567 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16568 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16569 length, LPFC_SLI4_MBX_EMBED);
16570 wq_create = &mbox->u.mqe.un.wq_create;
16571 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16572 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16574 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16577 /* wqv is the earliest version supported, NOT the latest */
16578 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16579 phba->sli4_hba.pc_sli4_params.wqv);
16581 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16582 (wq->page_size > SLI4_PAGE_SIZE))
16583 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16585 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16587 switch (wq_create_version) {
16588 case LPFC_Q_CREATE_VERSION_1:
16589 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16591 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16592 LPFC_Q_CREATE_VERSION_1);
16594 switch (wq->entry_size) {
16597 bf_set(lpfc_mbx_wq_create_wqe_size,
16598 &wq_create->u.request_1,
16599 LPFC_WQ_WQE_SIZE_64);
16602 bf_set(lpfc_mbx_wq_create_wqe_size,
16603 &wq_create->u.request_1,
16604 LPFC_WQ_WQE_SIZE_128);
16607 /* Request DPP by default */
16608 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16609 bf_set(lpfc_mbx_wq_create_page_size,
16610 &wq_create->u.request_1,
16611 (wq->page_size / SLI4_PAGE_SIZE));
16612 page = wq_create->u.request_1.page;
16615 page = wq_create->u.request.page;
16619 list_for_each_entry(dmabuf, &wq->page_list, list) {
16620 memset(dmabuf->virt, 0, hw_page_size);
16621 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16622 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16625 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16626 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16628 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16629 /* The IOCTL status is embedded in the mailbox subheader. */
16630 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16631 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16632 if (shdr_status || shdr_add_status || rc) {
16633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16634 "2503 WQ_CREATE mailbox failed with "
16635 "status x%x add_status x%x, mbx status x%x\n",
16636 shdr_status, shdr_add_status, rc);
16641 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16642 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16643 &wq_create->u.response);
16645 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16646 &wq_create->u.response_1);
16648 if (wq->queue_id == 0xFFFF) {
16653 wq->db_format = LPFC_DB_LIST_FORMAT;
16654 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16655 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16656 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16657 &wq_create->u.response);
16658 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16659 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16661 "3265 WQ[%d] doorbell format "
16662 "not supported: x%x\n",
16663 wq->queue_id, wq->db_format);
16667 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16668 &wq_create->u.response);
16669 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16671 if (!bar_memmap_p) {
16672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16673 "3263 WQ[%d] failed to memmap "
16674 "pci barset:x%x\n",
16675 wq->queue_id, pci_barset);
16679 db_offset = wq_create->u.response.doorbell_offset;
16680 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16681 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16683 "3252 WQ[%d] doorbell offset "
16684 "not supported: x%x\n",
16685 wq->queue_id, db_offset);
16689 wq->db_regaddr = bar_memmap_p + db_offset;
16690 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16691 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16692 "format:x%x\n", wq->queue_id,
16693 pci_barset, db_offset, wq->db_format);
16695 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16697 /* Check if DPP was honored by the firmware */
16698 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16699 &wq_create->u.response_1);
16700 if (wq->dpp_enable) {
16701 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16702 &wq_create->u.response_1);
16703 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16705 if (!bar_memmap_p) {
16706 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16707 "3267 WQ[%d] failed to memmap "
16708 "pci barset:x%x\n",
16709 wq->queue_id, pci_barset);
16713 db_offset = wq_create->u.response_1.doorbell_offset;
16714 wq->db_regaddr = bar_memmap_p + db_offset;
16715 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16716 &wq_create->u.response_1);
16717 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16718 &wq_create->u.response_1);
16719 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16721 if (!bar_memmap_p) {
16722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16723 "3268 WQ[%d] failed to memmap "
16724 "pci barset:x%x\n",
16725 wq->queue_id, dpp_barset);
16729 dpp_offset = wq_create->u.response_1.dpp_offset;
16730 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16731 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16732 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16733 "dpp_id:x%x dpp_barset:x%x "
16734 "dpp_offset:x%x\n",
16735 wq->queue_id, pci_barset, db_offset,
16736 wq->dpp_id, dpp_barset, dpp_offset);
16739 /* Enable combined writes for DPP aperture */
16740 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16741 rc = set_memory_wc(pg_addr, 1);
16743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16744 "3272 Cannot setup Combined "
16745 "Write on WQ[%d] - disable DPP\n",
16747 phba->cfg_enable_dpp = 0;
16750 phba->cfg_enable_dpp = 0;
16753 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16755 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16756 if (wq->pring == NULL) {
16760 wq->type = LPFC_WQ;
16761 wq->assoc_qid = cq->queue_id;
16762 wq->subtype = subtype;
16763 wq->host_index = 0;
16765 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16767 /* link the wq onto the parent cq child list */
16768 list_add_tail(&wq->list, &cq->child_list);
16770 mempool_free(mbox, phba->mbox_mem_pool);
16775 * lpfc_rq_create - Create a Receive Queue on the HBA
16776 * @phba: HBA structure that indicates port to create a queue on.
16777 * @hrq: The queue structure to use to create the header receive queue.
16778 * @drq: The queue structure to use to create the data receive queue.
16779 * @cq: The completion queue to bind this work queue to.
16780 * @subtype: The subtype of the work queue indicating its functionality.
16782 * This function creates a receive buffer queue pair , as detailed in @hrq and
16783 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16786 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16787 * struct is used to get the entry count that is necessary to determine the
16788 * number of pages to use for this queue. The @cq is used to indicate which
16789 * completion queue to bind received buffers that are posted to these queues to.
16790 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16791 * receive queue pair. This function is asynchronous and will wait for the
16792 * mailbox command to finish before continuing.
16794 * On success this function will return a zero. If unable to allocate enough
16795 * memory this function will return -ENOMEM. If the queue create mailbox command
16796 * fails this function will return -ENXIO.
16799 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16800 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16802 struct lpfc_mbx_rq_create *rq_create;
16803 struct lpfc_dmabuf *dmabuf;
16804 LPFC_MBOXQ_t *mbox;
16805 int rc, length, status = 0;
16806 uint32_t shdr_status, shdr_add_status;
16807 union lpfc_sli4_cfg_shdr *shdr;
16808 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16809 void __iomem *bar_memmap_p;
16810 uint32_t db_offset;
16811 uint16_t pci_barset;
16813 /* sanity check on queue memory */
16814 if (!hrq || !drq || !cq)
16816 if (!phba->sli4_hba.pc_sli4_params.supported)
16817 hw_page_size = SLI4_PAGE_SIZE;
16819 if (hrq->entry_count != drq->entry_count)
16821 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16824 length = (sizeof(struct lpfc_mbx_rq_create) -
16825 sizeof(struct lpfc_sli4_cfg_mhdr));
16826 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16827 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16828 length, LPFC_SLI4_MBX_EMBED);
16829 rq_create = &mbox->u.mqe.un.rq_create;
16830 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16831 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16832 phba->sli4_hba.pc_sli4_params.rqv);
16833 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16834 bf_set(lpfc_rq_context_rqe_count_1,
16835 &rq_create->u.request.context,
16837 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16838 bf_set(lpfc_rq_context_rqe_size,
16839 &rq_create->u.request.context,
16841 bf_set(lpfc_rq_context_page_size,
16842 &rq_create->u.request.context,
16843 LPFC_RQ_PAGE_SIZE_4096);
16845 switch (hrq->entry_count) {
16847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16848 "2535 Unsupported RQ count. (%d)\n",
16850 if (hrq->entry_count < 512) {
16854 fallthrough; /* otherwise default to smallest count */
16856 bf_set(lpfc_rq_context_rqe_count,
16857 &rq_create->u.request.context,
16858 LPFC_RQ_RING_SIZE_512);
16861 bf_set(lpfc_rq_context_rqe_count,
16862 &rq_create->u.request.context,
16863 LPFC_RQ_RING_SIZE_1024);
16866 bf_set(lpfc_rq_context_rqe_count,
16867 &rq_create->u.request.context,
16868 LPFC_RQ_RING_SIZE_2048);
16871 bf_set(lpfc_rq_context_rqe_count,
16872 &rq_create->u.request.context,
16873 LPFC_RQ_RING_SIZE_4096);
16876 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16877 LPFC_HDR_BUF_SIZE);
16879 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16881 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16883 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16884 memset(dmabuf->virt, 0, hw_page_size);
16885 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16886 putPaddrLow(dmabuf->phys);
16887 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16888 putPaddrHigh(dmabuf->phys);
16890 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16891 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16893 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16894 /* The IOCTL status is embedded in the mailbox subheader. */
16895 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16896 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16897 if (shdr_status || shdr_add_status || rc) {
16898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16899 "2504 RQ_CREATE mailbox failed with "
16900 "status x%x add_status x%x, mbx status x%x\n",
16901 shdr_status, shdr_add_status, rc);
16905 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16906 if (hrq->queue_id == 0xFFFF) {
16911 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16912 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16913 &rq_create->u.response);
16914 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16915 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16916 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16917 "3262 RQ [%d] doorbell format not "
16918 "supported: x%x\n", hrq->queue_id,
16924 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16925 &rq_create->u.response);
16926 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16927 if (!bar_memmap_p) {
16928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16929 "3269 RQ[%d] failed to memmap pci "
16930 "barset:x%x\n", hrq->queue_id,
16936 db_offset = rq_create->u.response.doorbell_offset;
16937 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16938 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16940 "3270 RQ[%d] doorbell offset not "
16941 "supported: x%x\n", hrq->queue_id,
16946 hrq->db_regaddr = bar_memmap_p + db_offset;
16947 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16948 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16949 "format:x%x\n", hrq->queue_id, pci_barset,
16950 db_offset, hrq->db_format);
16952 hrq->db_format = LPFC_DB_RING_FORMAT;
16953 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16955 hrq->type = LPFC_HRQ;
16956 hrq->assoc_qid = cq->queue_id;
16957 hrq->subtype = subtype;
16958 hrq->host_index = 0;
16959 hrq->hba_index = 0;
16960 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16962 /* now create the data queue */
16963 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16964 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16965 length, LPFC_SLI4_MBX_EMBED);
16966 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16967 phba->sli4_hba.pc_sli4_params.rqv);
16968 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16969 bf_set(lpfc_rq_context_rqe_count_1,
16970 &rq_create->u.request.context, hrq->entry_count);
16971 if (subtype == LPFC_NVMET)
16972 rq_create->u.request.context.buffer_size =
16973 LPFC_NVMET_DATA_BUF_SIZE;
16975 rq_create->u.request.context.buffer_size =
16976 LPFC_DATA_BUF_SIZE;
16977 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16979 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16980 (PAGE_SIZE/SLI4_PAGE_SIZE));
16982 switch (drq->entry_count) {
16984 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16985 "2536 Unsupported RQ count. (%d)\n",
16987 if (drq->entry_count < 512) {
16991 fallthrough; /* otherwise default to smallest count */
16993 bf_set(lpfc_rq_context_rqe_count,
16994 &rq_create->u.request.context,
16995 LPFC_RQ_RING_SIZE_512);
16998 bf_set(lpfc_rq_context_rqe_count,
16999 &rq_create->u.request.context,
17000 LPFC_RQ_RING_SIZE_1024);
17003 bf_set(lpfc_rq_context_rqe_count,
17004 &rq_create->u.request.context,
17005 LPFC_RQ_RING_SIZE_2048);
17008 bf_set(lpfc_rq_context_rqe_count,
17009 &rq_create->u.request.context,
17010 LPFC_RQ_RING_SIZE_4096);
17013 if (subtype == LPFC_NVMET)
17014 bf_set(lpfc_rq_context_buf_size,
17015 &rq_create->u.request.context,
17016 LPFC_NVMET_DATA_BUF_SIZE);
17018 bf_set(lpfc_rq_context_buf_size,
17019 &rq_create->u.request.context,
17020 LPFC_DATA_BUF_SIZE);
17022 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17024 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17026 list_for_each_entry(dmabuf, &drq->page_list, list) {
17027 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17028 putPaddrLow(dmabuf->phys);
17029 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17030 putPaddrHigh(dmabuf->phys);
17032 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17033 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17034 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17035 /* The IOCTL status is embedded in the mailbox subheader. */
17036 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17037 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17038 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17039 if (shdr_status || shdr_add_status || rc) {
17043 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17044 if (drq->queue_id == 0xFFFF) {
17048 drq->type = LPFC_DRQ;
17049 drq->assoc_qid = cq->queue_id;
17050 drq->subtype = subtype;
17051 drq->host_index = 0;
17052 drq->hba_index = 0;
17053 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17055 /* link the header and data RQs onto the parent cq child list */
17056 list_add_tail(&hrq->list, &cq->child_list);
17057 list_add_tail(&drq->list, &cq->child_list);
17060 mempool_free(mbox, phba->mbox_mem_pool);
17065 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17066 * @phba: HBA structure that indicates port to create a queue on.
17067 * @hrqp: The queue structure array to use to create the header receive queues.
17068 * @drqp: The queue structure array to use to create the data receive queues.
17069 * @cqp: The completion queue array to bind these receive queues to.
17070 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17072 * This function creates a receive buffer queue pair , as detailed in @hrq and
17073 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17076 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17077 * struct is used to get the entry count that is necessary to determine the
17078 * number of pages to use for this queue. The @cq is used to indicate which
17079 * completion queue to bind received buffers that are posted to these queues to.
17080 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17081 * receive queue pair. This function is asynchronous and will wait for the
17082 * mailbox command to finish before continuing.
17084 * On success this function will return a zero. If unable to allocate enough
17085 * memory this function will return -ENOMEM. If the queue create mailbox command
17086 * fails this function will return -ENXIO.
17089 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17090 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17093 struct lpfc_queue *hrq, *drq, *cq;
17094 struct lpfc_mbx_rq_create_v2 *rq_create;
17095 struct lpfc_dmabuf *dmabuf;
17096 LPFC_MBOXQ_t *mbox;
17097 int rc, length, alloclen, status = 0;
17098 int cnt, idx, numrq, page_idx = 0;
17099 uint32_t shdr_status, shdr_add_status;
17100 union lpfc_sli4_cfg_shdr *shdr;
17101 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17103 numrq = phba->cfg_nvmet_mrq;
17104 /* sanity check on array memory */
17105 if (!hrqp || !drqp || !cqp || !numrq)
17107 if (!phba->sli4_hba.pc_sli4_params.supported)
17108 hw_page_size = SLI4_PAGE_SIZE;
17110 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17114 length = sizeof(struct lpfc_mbx_rq_create_v2);
17115 length += ((2 * numrq * hrqp[0]->page_count) *
17116 sizeof(struct dma_address));
17118 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17119 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17120 LPFC_SLI4_MBX_NEMBED);
17121 if (alloclen < length) {
17122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17123 "3099 Allocated DMA memory size (%d) is "
17124 "less than the requested DMA memory size "
17125 "(%d)\n", alloclen, length);
17132 rq_create = mbox->sge_array->addr[0];
17133 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17135 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17138 for (idx = 0; idx < numrq; idx++) {
17143 /* sanity check on queue memory */
17144 if (!hrq || !drq || !cq) {
17149 if (hrq->entry_count != drq->entry_count) {
17155 bf_set(lpfc_mbx_rq_create_num_pages,
17156 &rq_create->u.request,
17158 bf_set(lpfc_mbx_rq_create_rq_cnt,
17159 &rq_create->u.request, (numrq * 2));
17160 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17162 bf_set(lpfc_rq_context_base_cq,
17163 &rq_create->u.request.context,
17165 bf_set(lpfc_rq_context_data_size,
17166 &rq_create->u.request.context,
17167 LPFC_NVMET_DATA_BUF_SIZE);
17168 bf_set(lpfc_rq_context_hdr_size,
17169 &rq_create->u.request.context,
17170 LPFC_HDR_BUF_SIZE);
17171 bf_set(lpfc_rq_context_rqe_count_1,
17172 &rq_create->u.request.context,
17174 bf_set(lpfc_rq_context_rqe_size,
17175 &rq_create->u.request.context,
17177 bf_set(lpfc_rq_context_page_size,
17178 &rq_create->u.request.context,
17179 (PAGE_SIZE/SLI4_PAGE_SIZE));
17182 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17183 memset(dmabuf->virt, 0, hw_page_size);
17184 cnt = page_idx + dmabuf->buffer_tag;
17185 rq_create->u.request.page[cnt].addr_lo =
17186 putPaddrLow(dmabuf->phys);
17187 rq_create->u.request.page[cnt].addr_hi =
17188 putPaddrHigh(dmabuf->phys);
17194 list_for_each_entry(dmabuf, &drq->page_list, list) {
17195 memset(dmabuf->virt, 0, hw_page_size);
17196 cnt = page_idx + dmabuf->buffer_tag;
17197 rq_create->u.request.page[cnt].addr_lo =
17198 putPaddrLow(dmabuf->phys);
17199 rq_create->u.request.page[cnt].addr_hi =
17200 putPaddrHigh(dmabuf->phys);
17205 hrq->db_format = LPFC_DB_RING_FORMAT;
17206 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17207 hrq->type = LPFC_HRQ;
17208 hrq->assoc_qid = cq->queue_id;
17209 hrq->subtype = subtype;
17210 hrq->host_index = 0;
17211 hrq->hba_index = 0;
17212 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17214 drq->db_format = LPFC_DB_RING_FORMAT;
17215 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17216 drq->type = LPFC_DRQ;
17217 drq->assoc_qid = cq->queue_id;
17218 drq->subtype = subtype;
17219 drq->host_index = 0;
17220 drq->hba_index = 0;
17221 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17223 list_add_tail(&hrq->list, &cq->child_list);
17224 list_add_tail(&drq->list, &cq->child_list);
17227 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17228 /* The IOCTL status is embedded in the mailbox subheader. */
17229 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17230 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17231 if (shdr_status || shdr_add_status || rc) {
17232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17233 "3120 RQ_CREATE mailbox failed with "
17234 "status x%x add_status x%x, mbx status x%x\n",
17235 shdr_status, shdr_add_status, rc);
17239 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17240 if (rc == 0xFFFF) {
17245 /* Initialize all RQs with associated queue id */
17246 for (idx = 0; idx < numrq; idx++) {
17248 hrq->queue_id = rc + (2 * idx);
17250 drq->queue_id = rc + (2 * idx) + 1;
17254 lpfc_sli4_mbox_cmd_free(phba, mbox);
17259 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17260 * @phba: HBA structure that indicates port to destroy a queue on.
17261 * @eq: The queue structure associated with the queue to destroy.
17263 * This function destroys a queue, as detailed in @eq by sending an mailbox
17264 * command, specific to the type of queue, to the HBA.
17266 * The @eq struct is used to get the queue ID of the queue to destroy.
17268 * On success this function will return a zero. If the queue destroy mailbox
17269 * command fails this function will return -ENXIO.
17272 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17274 LPFC_MBOXQ_t *mbox;
17275 int rc, length, status = 0;
17276 uint32_t shdr_status, shdr_add_status;
17277 union lpfc_sli4_cfg_shdr *shdr;
17279 /* sanity check on queue memory */
17283 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17286 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17287 sizeof(struct lpfc_sli4_cfg_mhdr));
17288 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17289 LPFC_MBOX_OPCODE_EQ_DESTROY,
17290 length, LPFC_SLI4_MBX_EMBED);
17291 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17293 mbox->vport = eq->phba->pport;
17294 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17296 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17297 /* The IOCTL status is embedded in the mailbox subheader. */
17298 shdr = (union lpfc_sli4_cfg_shdr *)
17299 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17300 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17301 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17302 if (shdr_status || shdr_add_status || rc) {
17303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17304 "2505 EQ_DESTROY mailbox failed with "
17305 "status x%x add_status x%x, mbx status x%x\n",
17306 shdr_status, shdr_add_status, rc);
17310 /* Remove eq from any list */
17311 list_del_init(&eq->list);
17312 mempool_free(mbox, eq->phba->mbox_mem_pool);
17317 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17318 * @phba: HBA structure that indicates port to destroy a queue on.
17319 * @cq: The queue structure associated with the queue to destroy.
17321 * This function destroys a queue, as detailed in @cq by sending an mailbox
17322 * command, specific to the type of queue, to the HBA.
17324 * The @cq struct is used to get the queue ID of the queue to destroy.
17326 * On success this function will return a zero. If the queue destroy mailbox
17327 * command fails this function will return -ENXIO.
17330 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17332 LPFC_MBOXQ_t *mbox;
17333 int rc, length, status = 0;
17334 uint32_t shdr_status, shdr_add_status;
17335 union lpfc_sli4_cfg_shdr *shdr;
17337 /* sanity check on queue memory */
17340 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17343 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17344 sizeof(struct lpfc_sli4_cfg_mhdr));
17345 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17346 LPFC_MBOX_OPCODE_CQ_DESTROY,
17347 length, LPFC_SLI4_MBX_EMBED);
17348 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17350 mbox->vport = cq->phba->pport;
17351 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17352 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17353 /* The IOCTL status is embedded in the mailbox subheader. */
17354 shdr = (union lpfc_sli4_cfg_shdr *)
17355 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17356 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17357 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17358 if (shdr_status || shdr_add_status || rc) {
17359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17360 "2506 CQ_DESTROY mailbox failed with "
17361 "status x%x add_status x%x, mbx status x%x\n",
17362 shdr_status, shdr_add_status, rc);
17365 /* Remove cq from any list */
17366 list_del_init(&cq->list);
17367 mempool_free(mbox, cq->phba->mbox_mem_pool);
17372 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17373 * @phba: HBA structure that indicates port to destroy a queue on.
17374 * @mq: The queue structure associated with the queue to destroy.
17376 * This function destroys a queue, as detailed in @mq by sending an mailbox
17377 * command, specific to the type of queue, to the HBA.
17379 * The @mq struct is used to get the queue ID of the queue to destroy.
17381 * On success this function will return a zero. If the queue destroy mailbox
17382 * command fails this function will return -ENXIO.
17385 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17387 LPFC_MBOXQ_t *mbox;
17388 int rc, length, status = 0;
17389 uint32_t shdr_status, shdr_add_status;
17390 union lpfc_sli4_cfg_shdr *shdr;
17392 /* sanity check on queue memory */
17395 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17398 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17399 sizeof(struct lpfc_sli4_cfg_mhdr));
17400 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17401 LPFC_MBOX_OPCODE_MQ_DESTROY,
17402 length, LPFC_SLI4_MBX_EMBED);
17403 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17405 mbox->vport = mq->phba->pport;
17406 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17407 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17408 /* The IOCTL status is embedded in the mailbox subheader. */
17409 shdr = (union lpfc_sli4_cfg_shdr *)
17410 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17411 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17412 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17413 if (shdr_status || shdr_add_status || rc) {
17414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17415 "2507 MQ_DESTROY mailbox failed with "
17416 "status x%x add_status x%x, mbx status x%x\n",
17417 shdr_status, shdr_add_status, rc);
17420 /* Remove mq from any list */
17421 list_del_init(&mq->list);
17422 mempool_free(mbox, mq->phba->mbox_mem_pool);
17427 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17428 * @phba: HBA structure that indicates port to destroy a queue on.
17429 * @wq: The queue structure associated with the queue to destroy.
17431 * This function destroys a queue, as detailed in @wq by sending an mailbox
17432 * command, specific to the type of queue, to the HBA.
17434 * The @wq struct is used to get the queue ID of the queue to destroy.
17436 * On success this function will return a zero. If the queue destroy mailbox
17437 * command fails this function will return -ENXIO.
17440 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17442 LPFC_MBOXQ_t *mbox;
17443 int rc, length, status = 0;
17444 uint32_t shdr_status, shdr_add_status;
17445 union lpfc_sli4_cfg_shdr *shdr;
17447 /* sanity check on queue memory */
17450 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17453 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17454 sizeof(struct lpfc_sli4_cfg_mhdr));
17455 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17456 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17457 length, LPFC_SLI4_MBX_EMBED);
17458 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17460 mbox->vport = wq->phba->pport;
17461 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17462 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17463 shdr = (union lpfc_sli4_cfg_shdr *)
17464 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17465 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17466 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17467 if (shdr_status || shdr_add_status || rc) {
17468 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17469 "2508 WQ_DESTROY mailbox failed with "
17470 "status x%x add_status x%x, mbx status x%x\n",
17471 shdr_status, shdr_add_status, rc);
17474 /* Remove wq from any list */
17475 list_del_init(&wq->list);
17478 mempool_free(mbox, wq->phba->mbox_mem_pool);
17483 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17484 * @phba: HBA structure that indicates port to destroy a queue on.
17485 * @hrq: The queue structure associated with the queue to destroy.
17486 * @drq: The queue structure associated with the queue to destroy.
17488 * This function destroys a queue, as detailed in @rq by sending an mailbox
17489 * command, specific to the type of queue, to the HBA.
17491 * The @rq struct is used to get the queue ID of the queue to destroy.
17493 * On success this function will return a zero. If the queue destroy mailbox
17494 * command fails this function will return -ENXIO.
17497 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17498 struct lpfc_queue *drq)
17500 LPFC_MBOXQ_t *mbox;
17501 int rc, length, status = 0;
17502 uint32_t shdr_status, shdr_add_status;
17503 union lpfc_sli4_cfg_shdr *shdr;
17505 /* sanity check on queue memory */
17508 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17511 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17512 sizeof(struct lpfc_sli4_cfg_mhdr));
17513 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17514 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17515 length, LPFC_SLI4_MBX_EMBED);
17516 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17518 mbox->vport = hrq->phba->pport;
17519 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17520 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17521 /* The IOCTL status is embedded in the mailbox subheader. */
17522 shdr = (union lpfc_sli4_cfg_shdr *)
17523 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17526 if (shdr_status || shdr_add_status || rc) {
17527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17528 "2509 RQ_DESTROY mailbox failed with "
17529 "status x%x add_status x%x, mbx status x%x\n",
17530 shdr_status, shdr_add_status, rc);
17531 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17534 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17536 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17537 shdr = (union lpfc_sli4_cfg_shdr *)
17538 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17539 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17540 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17541 if (shdr_status || shdr_add_status || rc) {
17542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17543 "2510 RQ_DESTROY mailbox failed with "
17544 "status x%x add_status x%x, mbx status x%x\n",
17545 shdr_status, shdr_add_status, rc);
17548 list_del_init(&hrq->list);
17549 list_del_init(&drq->list);
17550 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17555 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17556 * @phba: The virtual port for which this call being executed.
17557 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17558 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17559 * @xritag: the xritag that ties this io to the SGL pages.
17561 * This routine will post the sgl pages for the IO that has the xritag
17562 * that is in the iocbq structure. The xritag is assigned during iocbq
17563 * creation and persists for as long as the driver is loaded.
17564 * if the caller has fewer than 256 scatter gather segments to map then
17565 * pdma_phys_addr1 should be 0.
17566 * If the caller needs to map more than 256 scatter gather segment then
17567 * pdma_phys_addr1 should be a valid physical address.
17568 * physical address for SGLs must be 64 byte aligned.
17569 * If you are going to map 2 SGL's then the first one must have 256 entries
17570 * the second sgl can have between 1 and 256 entries.
17574 * -ENXIO, -ENOMEM - Failure
17577 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17578 dma_addr_t pdma_phys_addr0,
17579 dma_addr_t pdma_phys_addr1,
17582 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17583 LPFC_MBOXQ_t *mbox;
17585 uint32_t shdr_status, shdr_add_status;
17587 union lpfc_sli4_cfg_shdr *shdr;
17589 if (xritag == NO_XRI) {
17590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17591 "0364 Invalid param:\n");
17595 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17599 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17600 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17601 sizeof(struct lpfc_mbx_post_sgl_pages) -
17602 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17604 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17605 &mbox->u.mqe.un.post_sgl_pages;
17606 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17607 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17609 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17610 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17611 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17612 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17614 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17615 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17616 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17617 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17618 if (!phba->sli4_hba.intr_enable)
17619 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17621 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17622 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17624 /* The IOCTL status is embedded in the mailbox subheader. */
17625 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17626 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17627 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17628 if (!phba->sli4_hba.intr_enable)
17629 mempool_free(mbox, phba->mbox_mem_pool);
17630 else if (rc != MBX_TIMEOUT)
17631 mempool_free(mbox, phba->mbox_mem_pool);
17632 if (shdr_status || shdr_add_status || rc) {
17633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17634 "2511 POST_SGL mailbox failed with "
17635 "status x%x add_status x%x, mbx status x%x\n",
17636 shdr_status, shdr_add_status, rc);
17642 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17643 * @phba: pointer to lpfc hba data structure.
17645 * This routine is invoked to post rpi header templates to the
17646 * HBA consistent with the SLI-4 interface spec. This routine
17647 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17648 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17651 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17652 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17655 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17660 * Fetch the next logical xri. Because this index is logical,
17661 * the driver starts at 0 each time.
17663 spin_lock_irq(&phba->hbalock);
17664 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17665 phba->sli4_hba.max_cfg_param.max_xri);
17666 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17667 spin_unlock_irq(&phba->hbalock);
17670 set_bit(xri, phba->sli4_hba.xri_bmask);
17671 phba->sli4_hba.max_cfg_param.xri_used++;
17673 spin_unlock_irq(&phba->hbalock);
17678 * __lpfc_sli4_free_xri - Release an xri for reuse.
17679 * @phba: pointer to lpfc hba data structure.
17680 * @xri: xri to release.
17682 * This routine is invoked to release an xri to the pool of
17683 * available rpis maintained by the driver.
17686 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17688 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17689 phba->sli4_hba.max_cfg_param.xri_used--;
17694 * lpfc_sli4_free_xri - Release an xri for reuse.
17695 * @phba: pointer to lpfc hba data structure.
17696 * @xri: xri to release.
17698 * This routine is invoked to release an xri to the pool of
17699 * available rpis maintained by the driver.
17702 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17704 spin_lock_irq(&phba->hbalock);
17705 __lpfc_sli4_free_xri(phba, xri);
17706 spin_unlock_irq(&phba->hbalock);
17710 * lpfc_sli4_next_xritag - Get an xritag for the io
17711 * @phba: Pointer to HBA context object.
17713 * This function gets an xritag for the iocb. If there is no unused xritag
17714 * it will return 0xffff.
17715 * The function returns the allocated xritag if successful, else returns zero.
17716 * Zero is not a valid xritag.
17717 * The caller is not required to hold any lock.
17720 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17722 uint16_t xri_index;
17724 xri_index = lpfc_sli4_alloc_xri(phba);
17725 if (xri_index == NO_XRI)
17726 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17727 "2004 Failed to allocate XRI.last XRITAG is %d"
17728 " Max XRI is %d, Used XRI is %d\n",
17730 phba->sli4_hba.max_cfg_param.max_xri,
17731 phba->sli4_hba.max_cfg_param.xri_used);
17736 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17737 * @phba: pointer to lpfc hba data structure.
17738 * @post_sgl_list: pointer to els sgl entry list.
17739 * @post_cnt: number of els sgl entries on the list.
17741 * This routine is invoked to post a block of driver's sgl pages to the
17742 * HBA using non-embedded mailbox command. No Lock is held. This routine
17743 * is only called when the driver is loading and after all IO has been
17747 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17748 struct list_head *post_sgl_list,
17751 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17752 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17753 struct sgl_page_pairs *sgl_pg_pairs;
17755 LPFC_MBOXQ_t *mbox;
17756 uint32_t reqlen, alloclen, pg_pairs;
17758 uint16_t xritag_start = 0;
17760 uint32_t shdr_status, shdr_add_status;
17761 union lpfc_sli4_cfg_shdr *shdr;
17763 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17764 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17765 if (reqlen > SLI4_PAGE_SIZE) {
17766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17767 "2559 Block sgl registration required DMA "
17768 "size (%d) great than a page\n", reqlen);
17772 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17776 /* Allocate DMA memory and set up the non-embedded mailbox command */
17777 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17778 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17779 LPFC_SLI4_MBX_NEMBED);
17781 if (alloclen < reqlen) {
17782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17783 "0285 Allocated DMA memory size (%d) is "
17784 "less than the requested DMA memory "
17785 "size (%d)\n", alloclen, reqlen);
17786 lpfc_sli4_mbox_cmd_free(phba, mbox);
17789 /* Set up the SGL pages in the non-embedded DMA pages */
17790 viraddr = mbox->sge_array->addr[0];
17791 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17792 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17795 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17796 /* Set up the sge entry */
17797 sgl_pg_pairs->sgl_pg0_addr_lo =
17798 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17799 sgl_pg_pairs->sgl_pg0_addr_hi =
17800 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17801 sgl_pg_pairs->sgl_pg1_addr_lo =
17802 cpu_to_le32(putPaddrLow(0));
17803 sgl_pg_pairs->sgl_pg1_addr_hi =
17804 cpu_to_le32(putPaddrHigh(0));
17806 /* Keep the first xritag on the list */
17808 xritag_start = sglq_entry->sli4_xritag;
17813 /* Complete initialization and perform endian conversion. */
17814 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17815 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17816 sgl->word0 = cpu_to_le32(sgl->word0);
17818 if (!phba->sli4_hba.intr_enable)
17819 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17821 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17822 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17824 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17825 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17826 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17827 if (!phba->sli4_hba.intr_enable)
17828 lpfc_sli4_mbox_cmd_free(phba, mbox);
17829 else if (rc != MBX_TIMEOUT)
17830 lpfc_sli4_mbox_cmd_free(phba, mbox);
17831 if (shdr_status || shdr_add_status || rc) {
17832 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17833 "2513 POST_SGL_BLOCK mailbox command failed "
17834 "status x%x add_status x%x mbx status x%x\n",
17835 shdr_status, shdr_add_status, rc);
17842 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17843 * @phba: pointer to lpfc hba data structure.
17844 * @nblist: pointer to nvme buffer list.
17845 * @count: number of scsi buffers on the list.
17847 * This routine is invoked to post a block of @count scsi sgl pages from a
17848 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17853 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17856 struct lpfc_io_buf *lpfc_ncmd;
17857 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17858 struct sgl_page_pairs *sgl_pg_pairs;
17860 LPFC_MBOXQ_t *mbox;
17861 uint32_t reqlen, alloclen, pg_pairs;
17863 uint16_t xritag_start = 0;
17865 uint32_t shdr_status, shdr_add_status;
17866 dma_addr_t pdma_phys_bpl1;
17867 union lpfc_sli4_cfg_shdr *shdr;
17869 /* Calculate the requested length of the dma memory */
17870 reqlen = count * sizeof(struct sgl_page_pairs) +
17871 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17872 if (reqlen > SLI4_PAGE_SIZE) {
17873 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17874 "6118 Block sgl registration required DMA "
17875 "size (%d) great than a page\n", reqlen);
17878 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17881 "6119 Failed to allocate mbox cmd memory\n");
17885 /* Allocate DMA memory and set up the non-embedded mailbox command */
17886 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17887 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17888 reqlen, LPFC_SLI4_MBX_NEMBED);
17890 if (alloclen < reqlen) {
17891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17892 "6120 Allocated DMA memory size (%d) is "
17893 "less than the requested DMA memory "
17894 "size (%d)\n", alloclen, reqlen);
17895 lpfc_sli4_mbox_cmd_free(phba, mbox);
17899 /* Get the first SGE entry from the non-embedded DMA memory */
17900 viraddr = mbox->sge_array->addr[0];
17902 /* Set up the SGL pages in the non-embedded DMA pages */
17903 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17904 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17907 list_for_each_entry(lpfc_ncmd, nblist, list) {
17908 /* Set up the sge entry */
17909 sgl_pg_pairs->sgl_pg0_addr_lo =
17910 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17911 sgl_pg_pairs->sgl_pg0_addr_hi =
17912 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17913 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17914 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17917 pdma_phys_bpl1 = 0;
17918 sgl_pg_pairs->sgl_pg1_addr_lo =
17919 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17920 sgl_pg_pairs->sgl_pg1_addr_hi =
17921 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17922 /* Keep the first xritag on the list */
17924 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17928 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17929 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17930 /* Perform endian conversion if necessary */
17931 sgl->word0 = cpu_to_le32(sgl->word0);
17933 if (!phba->sli4_hba.intr_enable) {
17934 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17936 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17937 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17939 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17940 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17942 if (!phba->sli4_hba.intr_enable)
17943 lpfc_sli4_mbox_cmd_free(phba, mbox);
17944 else if (rc != MBX_TIMEOUT)
17945 lpfc_sli4_mbox_cmd_free(phba, mbox);
17946 if (shdr_status || shdr_add_status || rc) {
17947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17948 "6125 POST_SGL_BLOCK mailbox command failed "
17949 "status x%x add_status x%x mbx status x%x\n",
17950 shdr_status, shdr_add_status, rc);
17957 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17958 * @phba: pointer to lpfc hba data structure.
17959 * @post_nblist: pointer to the nvme buffer list.
17960 * @sb_count: number of nvme buffers.
17962 * This routine walks a list of nvme buffers that was passed in. It attempts
17963 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17964 * uses the non-embedded SGL block post mailbox commands to post to the port.
17965 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17966 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17967 * must be local list, thus no lock is needed when manipulate the list.
17969 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17972 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17973 struct list_head *post_nblist, int sb_count)
17975 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17976 int status, sgl_size;
17977 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17978 dma_addr_t pdma_phys_sgl1;
17979 int last_xritag = NO_XRI;
17981 LIST_HEAD(prep_nblist);
17982 LIST_HEAD(blck_nblist);
17983 LIST_HEAD(nvme_nblist);
17989 sgl_size = phba->cfg_sg_dma_buf_size;
17990 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17991 list_del_init(&lpfc_ncmd->list);
17993 if ((last_xritag != NO_XRI) &&
17994 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17995 /* a hole in xri block, form a sgl posting block */
17996 list_splice_init(&prep_nblist, &blck_nblist);
17997 post_cnt = block_cnt - 1;
17998 /* prepare list for next posting block */
17999 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18002 /* prepare list for next posting block */
18003 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18004 /* enough sgls for non-embed sgl mbox command */
18005 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18006 list_splice_init(&prep_nblist, &blck_nblist);
18007 post_cnt = block_cnt;
18012 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18014 /* end of repost sgl list condition for NVME buffers */
18015 if (num_posting == sb_count) {
18016 if (post_cnt == 0) {
18017 /* last sgl posting block */
18018 list_splice_init(&prep_nblist, &blck_nblist);
18019 post_cnt = block_cnt;
18020 } else if (block_cnt == 1) {
18021 /* last single sgl with non-contiguous xri */
18022 if (sgl_size > SGL_PAGE_SIZE)
18024 lpfc_ncmd->dma_phys_sgl +
18027 pdma_phys_sgl1 = 0;
18028 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18029 status = lpfc_sli4_post_sgl(
18030 phba, lpfc_ncmd->dma_phys_sgl,
18031 pdma_phys_sgl1, cur_xritag);
18033 /* Post error. Buffer unavailable. */
18034 lpfc_ncmd->flags |=
18035 LPFC_SBUF_NOT_POSTED;
18037 /* Post success. Bffer available. */
18038 lpfc_ncmd->flags &=
18039 ~LPFC_SBUF_NOT_POSTED;
18040 lpfc_ncmd->status = IOSTAT_SUCCESS;
18043 /* success, put on NVME buffer sgl list */
18044 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18048 /* continue until a nembed page worth of sgls */
18052 /* post block of NVME buffer list sgls */
18053 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18056 /* don't reset xirtag due to hole in xri block */
18057 if (block_cnt == 0)
18058 last_xritag = NO_XRI;
18060 /* reset NVME buffer post count for next round of posting */
18063 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18064 while (!list_empty(&blck_nblist)) {
18065 list_remove_head(&blck_nblist, lpfc_ncmd,
18066 struct lpfc_io_buf, list);
18068 /* Post error. Mark buffer unavailable. */
18069 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18071 /* Post success, Mark buffer available. */
18072 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18073 lpfc_ncmd->status = IOSTAT_SUCCESS;
18076 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18079 /* Push NVME buffers with sgl posted to the available list */
18080 lpfc_io_buf_replenish(phba, &nvme_nblist);
18086 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18087 * @phba: pointer to lpfc_hba struct that the frame was received on
18088 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18090 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18091 * valid type of frame that the LPFC driver will handle. This function will
18092 * return a zero if the frame is a valid frame or a non zero value when the
18093 * frame does not pass the check.
18096 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18098 /* make rctl_names static to save stack space */
18099 struct fc_vft_header *fc_vft_hdr;
18100 uint32_t *header = (uint32_t *) fc_hdr;
18102 #define FC_RCTL_MDS_DIAGS 0xF4
18104 switch (fc_hdr->fh_r_ctl) {
18105 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18106 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18107 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18108 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18109 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18110 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18111 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18112 case FC_RCTL_DD_CMD_STATUS: /* command status */
18113 case FC_RCTL_ELS_REQ: /* extended link services request */
18114 case FC_RCTL_ELS_REP: /* extended link services reply */
18115 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18116 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18117 case FC_RCTL_BA_ABTS: /* basic link service abort */
18118 case FC_RCTL_BA_RMC: /* remove connection */
18119 case FC_RCTL_BA_ACC: /* basic accept */
18120 case FC_RCTL_BA_RJT: /* basic reject */
18121 case FC_RCTL_BA_PRMT:
18122 case FC_RCTL_ACK_1: /* acknowledge_1 */
18123 case FC_RCTL_ACK_0: /* acknowledge_0 */
18124 case FC_RCTL_P_RJT: /* port reject */
18125 case FC_RCTL_F_RJT: /* fabric reject */
18126 case FC_RCTL_P_BSY: /* port busy */
18127 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18128 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18129 case FC_RCTL_LCR: /* link credit reset */
18130 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18131 case FC_RCTL_END: /* end */
18133 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18134 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18135 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18136 return lpfc_fc_frame_check(phba, fc_hdr);
18137 case FC_RCTL_BA_NOP: /* basic link service NOP */
18142 switch (fc_hdr->fh_type) {
18155 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18156 "2538 Received frame rctl:x%x, type:x%x, "
18157 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18158 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18159 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18160 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18161 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18162 be32_to_cpu(header[6]));
18165 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18166 "2539 Dropped frame rctl:x%x type:x%x\n",
18167 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18172 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18173 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18175 * This function processes the FC header to retrieve the VFI from the VF
18176 * header, if one exists. This function will return the VFI if one exists
18177 * or 0 if no VSAN Header exists.
18180 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18182 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18184 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18186 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18190 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18191 * @phba: Pointer to the HBA structure to search for the vport on
18192 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18193 * @fcfi: The FC Fabric ID that the frame came from
18194 * @did: Destination ID to match against
18196 * This function searches the @phba for a vport that matches the content of the
18197 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18198 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18199 * returns the matching vport pointer or NULL if unable to match frame to a
18202 static struct lpfc_vport *
18203 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18204 uint16_t fcfi, uint32_t did)
18206 struct lpfc_vport **vports;
18207 struct lpfc_vport *vport = NULL;
18210 if (did == Fabric_DID)
18211 return phba->pport;
18212 if ((phba->pport->fc_flag & FC_PT2PT) &&
18213 !(phba->link_state == LPFC_HBA_READY))
18214 return phba->pport;
18216 vports = lpfc_create_vport_work_array(phba);
18217 if (vports != NULL) {
18218 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18219 if (phba->fcf.fcfi == fcfi &&
18220 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18221 vports[i]->fc_myDID == did) {
18227 lpfc_destroy_vport_work_array(phba, vports);
18232 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18233 * @vport: The vport to work on.
18235 * This function updates the receive sequence time stamp for this vport. The
18236 * receive sequence time stamp indicates the time that the last frame of the
18237 * the sequence that has been idle for the longest amount of time was received.
18238 * the driver uses this time stamp to indicate if any received sequences have
18242 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18244 struct lpfc_dmabuf *h_buf;
18245 struct hbq_dmabuf *dmabuf = NULL;
18247 /* get the oldest sequence on the rcv list */
18248 h_buf = list_get_first(&vport->rcv_buffer_list,
18249 struct lpfc_dmabuf, list);
18252 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18253 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18257 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18258 * @vport: The vport that the received sequences were sent to.
18260 * This function cleans up all outstanding received sequences. This is called
18261 * by the driver when a link event or user action invalidates all the received
18265 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18267 struct lpfc_dmabuf *h_buf, *hnext;
18268 struct lpfc_dmabuf *d_buf, *dnext;
18269 struct hbq_dmabuf *dmabuf = NULL;
18271 /* start with the oldest sequence on the rcv list */
18272 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18273 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18274 list_del_init(&dmabuf->hbuf.list);
18275 list_for_each_entry_safe(d_buf, dnext,
18276 &dmabuf->dbuf.list, list) {
18277 list_del_init(&d_buf->list);
18278 lpfc_in_buf_free(vport->phba, d_buf);
18280 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18285 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18286 * @vport: The vport that the received sequences were sent to.
18288 * This function determines whether any received sequences have timed out by
18289 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18290 * indicates that there is at least one timed out sequence this routine will
18291 * go through the received sequences one at a time from most inactive to most
18292 * active to determine which ones need to be cleaned up. Once it has determined
18293 * that a sequence needs to be cleaned up it will simply free up the resources
18294 * without sending an abort.
18297 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18299 struct lpfc_dmabuf *h_buf, *hnext;
18300 struct lpfc_dmabuf *d_buf, *dnext;
18301 struct hbq_dmabuf *dmabuf = NULL;
18302 unsigned long timeout;
18303 int abort_count = 0;
18305 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18306 vport->rcv_buffer_time_stamp);
18307 if (list_empty(&vport->rcv_buffer_list) ||
18308 time_before(jiffies, timeout))
18310 /* start with the oldest sequence on the rcv list */
18311 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18312 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18313 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18314 dmabuf->time_stamp);
18315 if (time_before(jiffies, timeout))
18318 list_del_init(&dmabuf->hbuf.list);
18319 list_for_each_entry_safe(d_buf, dnext,
18320 &dmabuf->dbuf.list, list) {
18321 list_del_init(&d_buf->list);
18322 lpfc_in_buf_free(vport->phba, d_buf);
18324 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18327 lpfc_update_rcv_time_stamp(vport);
18331 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18332 * @vport: pointer to a vitural port
18333 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18335 * This function searches through the existing incomplete sequences that have
18336 * been sent to this @vport. If the frame matches one of the incomplete
18337 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18338 * make up that sequence. If no sequence is found that matches this frame then
18339 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18340 * This function returns a pointer to the first dmabuf in the sequence list that
18341 * the frame was linked to.
18343 static struct hbq_dmabuf *
18344 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18346 struct fc_frame_header *new_hdr;
18347 struct fc_frame_header *temp_hdr;
18348 struct lpfc_dmabuf *d_buf;
18349 struct lpfc_dmabuf *h_buf;
18350 struct hbq_dmabuf *seq_dmabuf = NULL;
18351 struct hbq_dmabuf *temp_dmabuf = NULL;
18354 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18355 dmabuf->time_stamp = jiffies;
18356 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18358 /* Use the hdr_buf to find the sequence that this frame belongs to */
18359 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18360 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18361 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18362 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18363 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18365 /* found a pending sequence that matches this frame */
18366 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18371 * This indicates first frame received for this sequence.
18372 * Queue the buffer on the vport's rcv_buffer_list.
18374 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18375 lpfc_update_rcv_time_stamp(vport);
18378 temp_hdr = seq_dmabuf->hbuf.virt;
18379 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18380 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18381 list_del_init(&seq_dmabuf->hbuf.list);
18382 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18383 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18384 lpfc_update_rcv_time_stamp(vport);
18387 /* move this sequence to the tail to indicate a young sequence */
18388 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18389 seq_dmabuf->time_stamp = jiffies;
18390 lpfc_update_rcv_time_stamp(vport);
18391 if (list_empty(&seq_dmabuf->dbuf.list)) {
18392 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18395 /* find the correct place in the sequence to insert this frame */
18396 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18398 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18399 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18401 * If the frame's sequence count is greater than the frame on
18402 * the list then insert the frame right after this frame
18404 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18405 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18406 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18411 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18413 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18422 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18423 * @vport: pointer to a vitural port
18424 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18426 * This function tries to abort from the partially assembed sequence, described
18427 * by the information from basic abbort @dmabuf. It checks to see whether such
18428 * partially assembled sequence held by the driver. If so, it shall free up all
18429 * the frames from the partially assembled sequence.
18432 * true -- if there is matching partially assembled sequence present and all
18433 * the frames freed with the sequence;
18434 * false -- if there is no matching partially assembled sequence present so
18435 * nothing got aborted in the lower layer driver
18438 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18439 struct hbq_dmabuf *dmabuf)
18441 struct fc_frame_header *new_hdr;
18442 struct fc_frame_header *temp_hdr;
18443 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18444 struct hbq_dmabuf *seq_dmabuf = NULL;
18446 /* Use the hdr_buf to find the sequence that matches this frame */
18447 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18448 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18449 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18450 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18451 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18452 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18453 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18454 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18456 /* found a pending sequence that matches this frame */
18457 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18461 /* Free up all the frames from the partially assembled sequence */
18463 list_for_each_entry_safe(d_buf, n_buf,
18464 &seq_dmabuf->dbuf.list, list) {
18465 list_del_init(&d_buf->list);
18466 lpfc_in_buf_free(vport->phba, d_buf);
18474 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18475 * @vport: pointer to a vitural port
18476 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18478 * This function tries to abort from the assembed sequence from upper level
18479 * protocol, described by the information from basic abbort @dmabuf. It
18480 * checks to see whether such pending context exists at upper level protocol.
18481 * If so, it shall clean up the pending context.
18484 * true -- if there is matching pending context of the sequence cleaned
18486 * false -- if there is no matching pending context of the sequence present
18490 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18492 struct lpfc_hba *phba = vport->phba;
18495 /* Accepting abort at ulp with SLI4 only */
18496 if (phba->sli_rev < LPFC_SLI_REV4)
18499 /* Register all caring upper level protocols to attend abort */
18500 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18508 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18509 * @phba: Pointer to HBA context object.
18510 * @cmd_iocbq: pointer to the command iocbq structure.
18511 * @rsp_iocbq: pointer to the response iocbq structure.
18513 * This function handles the sequence abort response iocb command complete
18514 * event. It properly releases the memory allocated to the sequence abort
18518 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18519 struct lpfc_iocbq *cmd_iocbq,
18520 struct lpfc_iocbq *rsp_iocbq)
18523 lpfc_nlp_put(cmd_iocbq->ndlp);
18524 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18527 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18528 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18530 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18531 get_job_ulpstatus(phba, rsp_iocbq),
18532 get_job_word4(phba, rsp_iocbq));
18536 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18537 * @phba: Pointer to HBA context object.
18538 * @xri: xri id in transaction.
18540 * This function validates the xri maps to the known range of XRIs allocated an
18541 * used by the driver.
18544 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18549 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18550 if (xri == phba->sli4_hba.xri_ids[i])
18557 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18558 * @vport: pointer to a virtual port.
18559 * @fc_hdr: pointer to a FC frame header.
18560 * @aborted: was the partially assembled receive sequence successfully aborted
18562 * This function sends a basic response to a previous unsol sequence abort
18563 * event after aborting the sequence handling.
18566 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18567 struct fc_frame_header *fc_hdr, bool aborted)
18569 struct lpfc_hba *phba = vport->phba;
18570 struct lpfc_iocbq *ctiocb = NULL;
18571 struct lpfc_nodelist *ndlp;
18572 uint16_t oxid, rxid, xri, lxri;
18573 uint32_t sid, fctl;
18574 union lpfc_wqe128 *icmd;
18577 if (!lpfc_is_link_up(phba))
18580 sid = sli4_sid_from_fc_hdr(fc_hdr);
18581 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18582 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18584 ndlp = lpfc_findnode_did(vport, sid);
18586 ndlp = lpfc_nlp_init(vport, sid);
18588 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18589 "1268 Failed to allocate ndlp for "
18590 "oxid:x%x SID:x%x\n", oxid, sid);
18593 /* Put ndlp onto pport node list */
18594 lpfc_enqueue_node(vport, ndlp);
18597 /* Allocate buffer for rsp iocb */
18598 ctiocb = lpfc_sli_get_iocbq(phba);
18602 icmd = &ctiocb->wqe;
18604 /* Extract the F_CTL field from FC_HDR */
18605 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18607 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18608 if (!ctiocb->ndlp) {
18609 lpfc_sli_release_iocbq(phba, ctiocb);
18613 ctiocb->vport = phba->pport;
18614 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18615 ctiocb->sli4_lxritag = NO_XRI;
18616 ctiocb->sli4_xritag = NO_XRI;
18617 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18619 if (fctl & FC_FC_EX_CTX)
18620 /* Exchange responder sent the abort so we
18626 lxri = lpfc_sli4_xri_inrange(phba, xri);
18627 if (lxri != NO_XRI)
18628 lpfc_set_rrq_active(phba, ndlp, lxri,
18629 (xri == oxid) ? rxid : oxid, 0);
18630 /* For BA_ABTS from exchange responder, if the logical xri with
18631 * the oxid maps to the FCP XRI range, the port no longer has
18632 * that exchange context, send a BLS_RJT. Override the IOCB for
18635 if ((fctl & FC_FC_EX_CTX) &&
18636 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18637 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18638 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18639 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18640 FC_BA_RJT_INV_XID);
18641 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18645 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18646 * the driver no longer has that exchange, send a BLS_RJT. Override
18647 * the IOCB for a BA_RJT.
18649 if (aborted == false) {
18650 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18651 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18652 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18653 FC_BA_RJT_INV_XID);
18654 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18658 if (fctl & FC_FC_EX_CTX) {
18659 /* ABTS sent by responder to CT exchange, construction
18660 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18661 * field and RX_ID from ABTS for RX_ID field.
18663 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18664 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18666 /* ABTS sent by initiator to CT exchange, construction
18667 * of BA_ACC will need to allocate a new XRI as for the
18670 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18673 /* OX_ID is invariable to who sent ABTS to CT exchange */
18674 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18675 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18678 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18680 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18681 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18682 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18684 /* Xmit CT abts response on exchange <xid> */
18685 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18686 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18687 ctiocb->abort_rctl, oxid, phba->link_state);
18689 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18690 if (rc == IOCB_ERROR) {
18691 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18692 "2925 Failed to issue CT ABTS RSP x%x on "
18693 "xri x%x, Data x%x\n",
18694 ctiocb->abort_rctl, oxid,
18696 lpfc_nlp_put(ndlp);
18697 ctiocb->ndlp = NULL;
18698 lpfc_sli_release_iocbq(phba, ctiocb);
18703 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18704 * @vport: Pointer to the vport on which this sequence was received
18705 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18707 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18708 * receive sequence is only partially assembed by the driver, it shall abort
18709 * the partially assembled frames for the sequence. Otherwise, if the
18710 * unsolicited receive sequence has been completely assembled and passed to
18711 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18712 * unsolicited sequence has been aborted. After that, it will issue a basic
18713 * accept to accept the abort.
18716 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18717 struct hbq_dmabuf *dmabuf)
18719 struct lpfc_hba *phba = vport->phba;
18720 struct fc_frame_header fc_hdr;
18724 /* Make a copy of fc_hdr before the dmabuf being released */
18725 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18726 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18728 if (fctl & FC_FC_EX_CTX) {
18729 /* ABTS by responder to exchange, no cleanup needed */
18732 /* ABTS by initiator to exchange, need to do cleanup */
18733 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18734 if (aborted == false)
18735 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18737 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18739 if (phba->nvmet_support) {
18740 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18744 /* Respond with BA_ACC or BA_RJT accordingly */
18745 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18749 * lpfc_seq_complete - Indicates if a sequence is complete
18750 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18752 * This function checks the sequence, starting with the frame described by
18753 * @dmabuf, to see if all the frames associated with this sequence are present.
18754 * the frames associated with this sequence are linked to the @dmabuf using the
18755 * dbuf list. This function looks for two major things. 1) That the first frame
18756 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18757 * set. 3) That there are no holes in the sequence count. The function will
18758 * return 1 when the sequence is complete, otherwise it will return 0.
18761 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18763 struct fc_frame_header *hdr;
18764 struct lpfc_dmabuf *d_buf;
18765 struct hbq_dmabuf *seq_dmabuf;
18769 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18770 /* make sure first fame of sequence has a sequence count of zero */
18771 if (hdr->fh_seq_cnt != seq_count)
18773 fctl = (hdr->fh_f_ctl[0] << 16 |
18774 hdr->fh_f_ctl[1] << 8 |
18776 /* If last frame of sequence we can return success. */
18777 if (fctl & FC_FC_END_SEQ)
18779 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18780 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18781 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18782 /* If there is a hole in the sequence count then fail. */
18783 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18785 fctl = (hdr->fh_f_ctl[0] << 16 |
18786 hdr->fh_f_ctl[1] << 8 |
18788 /* If last frame of sequence we can return success. */
18789 if (fctl & FC_FC_END_SEQ)
18796 * lpfc_prep_seq - Prep sequence for ULP processing
18797 * @vport: Pointer to the vport on which this sequence was received
18798 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18800 * This function takes a sequence, described by a list of frames, and creates
18801 * a list of iocbq structures to describe the sequence. This iocbq list will be
18802 * used to issue to the generic unsolicited sequence handler. This routine
18803 * returns a pointer to the first iocbq in the list. If the function is unable
18804 * to allocate an iocbq then it throw out the received frames that were not
18805 * able to be described and return a pointer to the first iocbq. If unable to
18806 * allocate any iocbqs (including the first) this function will return NULL.
18808 static struct lpfc_iocbq *
18809 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18811 struct hbq_dmabuf *hbq_buf;
18812 struct lpfc_dmabuf *d_buf, *n_buf;
18813 struct lpfc_iocbq *first_iocbq, *iocbq;
18814 struct fc_frame_header *fc_hdr;
18816 uint32_t len, tot_len;
18818 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18819 /* remove from receive buffer list */
18820 list_del_init(&seq_dmabuf->hbuf.list);
18821 lpfc_update_rcv_time_stamp(vport);
18822 /* get the Remote Port's SID */
18823 sid = sli4_sid_from_fc_hdr(fc_hdr);
18825 /* Get an iocbq struct to fill in. */
18826 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18828 /* Initialize the first IOCB. */
18829 first_iocbq->wcqe_cmpl.total_data_placed = 0;
18830 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
18832 first_iocbq->vport = vport;
18834 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18835 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18836 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
18837 sli4_did_from_fc_hdr(fc_hdr));
18840 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18842 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18843 be16_to_cpu(fc_hdr->fh_ox_id));
18845 /* put the first buffer into the first iocb */
18846 tot_len = bf_get(lpfc_rcqe_length,
18847 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18849 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
18850 first_iocbq->bpl_dmabuf = NULL;
18851 /* Keep track of the BDE count */
18852 first_iocbq->wcqe_cmpl.word3 = 1;
18854 if (tot_len > LPFC_DATA_BUF_SIZE)
18855 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
18856 LPFC_DATA_BUF_SIZE;
18858 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
18860 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
18861 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
18864 iocbq = first_iocbq;
18866 * Each IOCBq can have two Buffers assigned, so go through the list
18867 * of buffers for this sequence and save two buffers in each IOCBq
18869 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18871 lpfc_in_buf_free(vport->phba, d_buf);
18874 if (!iocbq->bpl_dmabuf) {
18875 iocbq->bpl_dmabuf = d_buf;
18876 iocbq->wcqe_cmpl.word3++;
18877 /* We need to get the size out of the right CQE */
18878 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18879 len = bf_get(lpfc_rcqe_length,
18880 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18881 iocbq->unsol_rcv_len = len;
18882 iocbq->wcqe_cmpl.total_data_placed += len;
18885 iocbq = lpfc_sli_get_iocbq(vport->phba);
18888 bf_set(lpfc_wcqe_c_status,
18889 &first_iocbq->wcqe_cmpl,
18891 first_iocbq->wcqe_cmpl.parameter =
18892 IOERR_NO_RESOURCES;
18894 lpfc_in_buf_free(vport->phba, d_buf);
18897 /* We need to get the size out of the right CQE */
18898 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18899 len = bf_get(lpfc_rcqe_length,
18900 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18901 iocbq->cmd_dmabuf = d_buf;
18902 iocbq->bpl_dmabuf = NULL;
18903 iocbq->wcqe_cmpl.word3 = 1;
18905 if (len > LPFC_DATA_BUF_SIZE)
18906 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18907 LPFC_DATA_BUF_SIZE;
18909 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18913 iocbq->wcqe_cmpl.total_data_placed = tot_len;
18914 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
18916 list_add_tail(&iocbq->list, &first_iocbq->list);
18919 /* Free the sequence's header buffer */
18921 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18923 return first_iocbq;
18927 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18928 struct hbq_dmabuf *seq_dmabuf)
18930 struct fc_frame_header *fc_hdr;
18931 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18932 struct lpfc_hba *phba = vport->phba;
18934 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18935 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18938 "2707 Ring %d handler: Failed to allocate "
18939 "iocb Rctl x%x Type x%x received\n",
18941 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18944 if (!lpfc_complete_unsol_iocb(phba,
18945 phba->sli4_hba.els_wq->pring,
18946 iocbq, fc_hdr->fh_r_ctl,
18947 fc_hdr->fh_type)) {
18948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18949 "2540 Ring %d handler: unexpected Rctl "
18950 "x%x Type x%x received\n",
18952 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18953 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
18956 /* Free iocb created in lpfc_prep_seq */
18957 list_for_each_entry_safe(curr_iocb, next_iocb,
18958 &iocbq->list, list) {
18959 list_del_init(&curr_iocb->list);
18960 lpfc_sli_release_iocbq(phba, curr_iocb);
18962 lpfc_sli_release_iocbq(phba, iocbq);
18966 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18967 struct lpfc_iocbq *rspiocb)
18969 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
18971 if (pcmd && pcmd->virt)
18972 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18974 lpfc_sli_release_iocbq(phba, cmdiocb);
18975 lpfc_drain_txq(phba);
18979 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18980 struct hbq_dmabuf *dmabuf)
18982 struct fc_frame_header *fc_hdr;
18983 struct lpfc_hba *phba = vport->phba;
18984 struct lpfc_iocbq *iocbq = NULL;
18985 union lpfc_wqe128 *pwqe;
18986 struct lpfc_dmabuf *pcmd = NULL;
18987 uint32_t frame_len;
18989 unsigned long iflags;
18991 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18992 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18994 /* Send the received frame back */
18995 iocbq = lpfc_sli_get_iocbq(phba);
18997 /* Queue cq event and wakeup worker thread to process it */
18998 spin_lock_irqsave(&phba->hbalock, iflags);
18999 list_add_tail(&dmabuf->cq_event.list,
19000 &phba->sli4_hba.sp_queue_event);
19001 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19002 spin_unlock_irqrestore(&phba->hbalock, iflags);
19003 lpfc_worker_wake_up(phba);
19007 /* Allocate buffer for command payload */
19008 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19010 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19012 if (!pcmd || !pcmd->virt)
19015 INIT_LIST_HEAD(&pcmd->list);
19017 /* copyin the payload */
19018 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19020 iocbq->cmd_dmabuf = pcmd;
19021 iocbq->vport = vport;
19022 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19023 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19024 iocbq->num_bdes = 0;
19026 pwqe = &iocbq->wqe;
19027 /* fill in BDE's for command */
19028 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19029 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19030 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19031 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19033 pwqe->send_frame.frame_len = frame_len;
19034 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19035 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19036 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19037 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19038 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19039 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19041 pwqe->generic.wqe_com.word7 = 0;
19042 pwqe->generic.wqe_com.word10 = 0;
19044 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19045 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19046 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19047 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19048 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19049 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19050 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19051 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19052 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19053 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19054 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19055 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19056 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19058 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19060 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19061 if (rc == IOCB_ERROR)
19064 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19068 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19069 "2023 Unable to process MDS loopback frame\n");
19070 if (pcmd && pcmd->virt)
19071 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19074 lpfc_sli_release_iocbq(phba, iocbq);
19075 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19079 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19080 * @phba: Pointer to HBA context object.
19081 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19083 * This function is called with no lock held. This function processes all
19084 * the received buffers and gives it to upper layers when a received buffer
19085 * indicates that it is the final frame in the sequence. The interrupt
19086 * service routine processes received buffers at interrupt contexts.
19087 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19088 * appropriate receive function when the final frame in a sequence is received.
19091 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19092 struct hbq_dmabuf *dmabuf)
19094 struct hbq_dmabuf *seq_dmabuf;
19095 struct fc_frame_header *fc_hdr;
19096 struct lpfc_vport *vport;
19100 /* Process each received buffer */
19101 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19103 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19104 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19105 vport = phba->pport;
19106 /* Handle MDS Loopback frames */
19107 if (!(phba->pport->load_flag & FC_UNLOADING))
19108 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19110 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19114 /* check to see if this a valid type of frame */
19115 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19116 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19120 if ((bf_get(lpfc_cqe_code,
19121 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19122 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19123 &dmabuf->cq_event.cqe.rcqe_cmpl);
19125 fcfi = bf_get(lpfc_rcqe_fcf_id,
19126 &dmabuf->cq_event.cqe.rcqe_cmpl);
19128 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19129 vport = phba->pport;
19130 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19131 "2023 MDS Loopback %d bytes\n",
19132 bf_get(lpfc_rcqe_length,
19133 &dmabuf->cq_event.cqe.rcqe_cmpl));
19134 /* Handle MDS Loopback frames */
19135 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19139 /* d_id this frame is directed to */
19140 did = sli4_did_from_fc_hdr(fc_hdr);
19142 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19144 /* throw out the frame */
19145 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19149 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19150 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19151 (did != Fabric_DID)) {
19153 * Throw out the frame if we are not pt2pt.
19154 * The pt2pt protocol allows for discovery frames
19155 * to be received without a registered VPI.
19157 if (!(vport->fc_flag & FC_PT2PT) ||
19158 (phba->link_state == LPFC_HBA_READY)) {
19159 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19164 /* Handle the basic abort sequence (BA_ABTS) event */
19165 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19166 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19170 /* Link this frame */
19171 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19173 /* unable to add frame to vport - throw it out */
19174 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19177 /* If not last frame in sequence continue processing frames. */
19178 if (!lpfc_seq_complete(seq_dmabuf))
19181 /* Send the complete sequence to the upper layer protocol */
19182 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19186 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19187 * @phba: pointer to lpfc hba data structure.
19189 * This routine is invoked to post rpi header templates to the
19190 * HBA consistent with the SLI-4 interface spec. This routine
19191 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19192 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19194 * This routine does not require any locks. It's usage is expected
19195 * to be driver load or reset recovery when the driver is
19200 * -EIO - The mailbox failed to complete successfully.
19201 * When this error occurs, the driver is not guaranteed
19202 * to have any rpi regions posted to the device and
19203 * must either attempt to repost the regions or take a
19207 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19209 struct lpfc_rpi_hdr *rpi_page;
19213 /* SLI4 ports that support extents do not require RPI headers. */
19214 if (!phba->sli4_hba.rpi_hdrs_in_use)
19216 if (phba->sli4_hba.extents_in_use)
19219 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19221 * Assign the rpi headers a physical rpi only if the driver
19222 * has not initialized those resources. A port reset only
19223 * needs the headers posted.
19225 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19227 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19229 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19230 if (rc != MBX_SUCCESS) {
19231 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19232 "2008 Error %d posting all rpi "
19240 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19241 LPFC_RPI_RSRC_RDY);
19246 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19247 * @phba: pointer to lpfc hba data structure.
19248 * @rpi_page: pointer to the rpi memory region.
19250 * This routine is invoked to post a single rpi header to the
19251 * HBA consistent with the SLI-4 interface spec. This memory region
19252 * maps up to 64 rpi context regions.
19256 * -ENOMEM - No available memory
19257 * -EIO - The mailbox failed to complete successfully.
19260 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19262 LPFC_MBOXQ_t *mboxq;
19263 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19265 uint32_t shdr_status, shdr_add_status;
19266 union lpfc_sli4_cfg_shdr *shdr;
19268 /* SLI4 ports that support extents do not require RPI headers. */
19269 if (!phba->sli4_hba.rpi_hdrs_in_use)
19271 if (phba->sli4_hba.extents_in_use)
19274 /* The port is notified of the header region via a mailbox command. */
19275 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19278 "2001 Unable to allocate memory for issuing "
19279 "SLI_CONFIG_SPECIAL mailbox command\n");
19283 /* Post all rpi memory regions to the port. */
19284 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19285 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19286 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19287 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19288 sizeof(struct lpfc_sli4_cfg_mhdr),
19289 LPFC_SLI4_MBX_EMBED);
19292 /* Post the physical rpi to the port for this rpi header. */
19293 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19294 rpi_page->start_rpi);
19295 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19296 hdr_tmpl, rpi_page->page_count);
19298 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19299 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19300 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19301 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19302 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19303 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19304 mempool_free(mboxq, phba->mbox_mem_pool);
19305 if (shdr_status || shdr_add_status || rc) {
19306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19307 "2514 POST_RPI_HDR mailbox failed with "
19308 "status x%x add_status x%x, mbx status x%x\n",
19309 shdr_status, shdr_add_status, rc);
19313 * The next_rpi stores the next logical module-64 rpi value used
19314 * to post physical rpis in subsequent rpi postings.
19316 spin_lock_irq(&phba->hbalock);
19317 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19318 spin_unlock_irq(&phba->hbalock);
19324 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19325 * @phba: pointer to lpfc hba data structure.
19327 * This routine is invoked to post rpi header templates to the
19328 * HBA consistent with the SLI-4 interface spec. This routine
19329 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19330 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19333 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19334 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19337 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19340 uint16_t max_rpi, rpi_limit;
19341 uint16_t rpi_remaining, lrpi = 0;
19342 struct lpfc_rpi_hdr *rpi_hdr;
19343 unsigned long iflag;
19346 * Fetch the next logical rpi. Because this index is logical,
19347 * the driver starts at 0 each time.
19349 spin_lock_irqsave(&phba->hbalock, iflag);
19350 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19351 rpi_limit = phba->sli4_hba.next_rpi;
19353 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19354 if (rpi >= rpi_limit)
19355 rpi = LPFC_RPI_ALLOC_ERROR;
19357 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19358 phba->sli4_hba.max_cfg_param.rpi_used++;
19359 phba->sli4_hba.rpi_count++;
19361 lpfc_printf_log(phba, KERN_INFO,
19362 LOG_NODE | LOG_DISCOVERY,
19363 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19364 (int) rpi, max_rpi, rpi_limit);
19367 * Don't try to allocate more rpi header regions if the device limit
19368 * has been exhausted.
19370 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19371 (phba->sli4_hba.rpi_count >= max_rpi)) {
19372 spin_unlock_irqrestore(&phba->hbalock, iflag);
19377 * RPI header postings are not required for SLI4 ports capable of
19380 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19381 spin_unlock_irqrestore(&phba->hbalock, iflag);
19386 * If the driver is running low on rpi resources, allocate another
19387 * page now. Note that the next_rpi value is used because
19388 * it represents how many are actually in use whereas max_rpi notes
19389 * how many are supported max by the device.
19391 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19392 spin_unlock_irqrestore(&phba->hbalock, iflag);
19393 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19394 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19397 "2002 Error Could not grow rpi "
19400 lrpi = rpi_hdr->start_rpi;
19401 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19402 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19410 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19411 * @phba: pointer to lpfc hba data structure.
19412 * @rpi: rpi to free
19414 * This routine is invoked to release an rpi to the pool of
19415 * available rpis maintained by the driver.
19418 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19421 * if the rpi value indicates a prior unreg has already
19422 * been done, skip the unreg.
19424 if (rpi == LPFC_RPI_ALLOC_ERROR)
19427 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19428 phba->sli4_hba.rpi_count--;
19429 phba->sli4_hba.max_cfg_param.rpi_used--;
19431 lpfc_printf_log(phba, KERN_INFO,
19432 LOG_NODE | LOG_DISCOVERY,
19433 "2016 rpi %x not inuse\n",
19439 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19440 * @phba: pointer to lpfc hba data structure.
19441 * @rpi: rpi to free
19443 * This routine is invoked to release an rpi to the pool of
19444 * available rpis maintained by the driver.
19447 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19449 spin_lock_irq(&phba->hbalock);
19450 __lpfc_sli4_free_rpi(phba, rpi);
19451 spin_unlock_irq(&phba->hbalock);
19455 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19456 * @phba: pointer to lpfc hba data structure.
19458 * This routine is invoked to remove the memory region that
19459 * provided rpi via a bitmask.
19462 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19464 kfree(phba->sli4_hba.rpi_bmask);
19465 kfree(phba->sli4_hba.rpi_ids);
19466 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19470 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19471 * @ndlp: pointer to lpfc nodelist data structure.
19472 * @cmpl: completion call-back.
19473 * @arg: data to load as MBox 'caller buffer information'
19475 * This routine is invoked to remove the memory region that
19476 * provided rpi via a bitmask.
19479 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19480 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19482 LPFC_MBOXQ_t *mboxq;
19483 struct lpfc_hba *phba = ndlp->phba;
19486 /* The port is notified of the header region via a mailbox command. */
19487 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19491 /* If cmpl assigned, then this nlp_get pairs with
19492 * lpfc_mbx_cmpl_resume_rpi.
19494 * Else cmpl is NULL, then this nlp_get pairs with
19495 * lpfc_sli_def_mbox_cmpl.
19497 if (!lpfc_nlp_get(ndlp)) {
19498 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19499 "2122 %s: Failed to get nlp ref\n",
19501 mempool_free(mboxq, phba->mbox_mem_pool);
19505 /* Post all rpi memory regions to the port. */
19506 lpfc_resume_rpi(mboxq, ndlp);
19508 mboxq->mbox_cmpl = cmpl;
19509 mboxq->ctx_buf = arg;
19511 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19512 mboxq->ctx_ndlp = ndlp;
19513 mboxq->vport = ndlp->vport;
19514 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19515 if (rc == MBX_NOT_FINISHED) {
19516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19517 "2010 Resume RPI Mailbox failed "
19518 "status %d, mbxStatus x%x\n", rc,
19519 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19520 lpfc_nlp_put(ndlp);
19521 mempool_free(mboxq, phba->mbox_mem_pool);
19528 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19529 * @vport: Pointer to the vport for which the vpi is being initialized
19531 * This routine is invoked to activate a vpi with the port.
19535 * -Evalue otherwise
19538 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19540 LPFC_MBOXQ_t *mboxq;
19542 int retval = MBX_SUCCESS;
19544 struct lpfc_hba *phba = vport->phba;
19545 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19548 lpfc_init_vpi(phba, mboxq, vport->vpi);
19549 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19550 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19551 if (rc != MBX_SUCCESS) {
19552 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19553 "2022 INIT VPI Mailbox failed "
19554 "status %d, mbxStatus x%x\n", rc,
19555 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19558 if (rc != MBX_TIMEOUT)
19559 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19565 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19566 * @phba: pointer to lpfc hba data structure.
19567 * @mboxq: Pointer to mailbox object.
19569 * This routine is invoked to manually add a single FCF record. The caller
19570 * must pass a completely initialized FCF_Record. This routine takes
19571 * care of the nonembedded mailbox operations.
19574 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19577 union lpfc_sli4_cfg_shdr *shdr;
19578 uint32_t shdr_status, shdr_add_status;
19580 virt_addr = mboxq->sge_array->addr[0];
19581 /* The IOCTL status is embedded in the mailbox subheader. */
19582 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19583 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19584 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19586 if ((shdr_status || shdr_add_status) &&
19587 (shdr_status != STATUS_FCF_IN_USE))
19588 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19589 "2558 ADD_FCF_RECORD mailbox failed with "
19590 "status x%x add_status x%x\n",
19591 shdr_status, shdr_add_status);
19593 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19597 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19598 * @phba: pointer to lpfc hba data structure.
19599 * @fcf_record: pointer to the initialized fcf record to add.
19601 * This routine is invoked to manually add a single FCF record. The caller
19602 * must pass a completely initialized FCF_Record. This routine takes
19603 * care of the nonembedded mailbox operations.
19606 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19609 LPFC_MBOXQ_t *mboxq;
19612 struct lpfc_mbx_sge sge;
19613 uint32_t alloc_len, req_len;
19616 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19618 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19619 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19623 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19626 /* Allocate DMA memory and set up the non-embedded mailbox command */
19627 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19628 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19629 req_len, LPFC_SLI4_MBX_NEMBED);
19630 if (alloc_len < req_len) {
19631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19632 "2523 Allocated DMA memory size (x%x) is "
19633 "less than the requested DMA memory "
19634 "size (x%x)\n", alloc_len, req_len);
19635 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19640 * Get the first SGE entry from the non-embedded DMA memory. This
19641 * routine only uses a single SGE.
19643 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19644 virt_addr = mboxq->sge_array->addr[0];
19646 * Configure the FCF record for FCFI 0. This is the driver's
19647 * hardcoded default and gets used in nonFIP mode.
19649 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19650 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19651 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19654 * Copy the fcf_index and the FCF Record Data. The data starts after
19655 * the FCoE header plus word10. The data copy needs to be endian
19658 bytep += sizeof(uint32_t);
19659 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19660 mboxq->vport = phba->pport;
19661 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19662 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19663 if (rc == MBX_NOT_FINISHED) {
19664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19665 "2515 ADD_FCF_RECORD mailbox failed with "
19666 "status 0x%x\n", rc);
19667 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19676 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19677 * @phba: pointer to lpfc hba data structure.
19678 * @fcf_record: pointer to the fcf record to write the default data.
19679 * @fcf_index: FCF table entry index.
19681 * This routine is invoked to build the driver's default FCF record. The
19682 * values used are hardcoded. This routine handles memory initialization.
19686 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19687 struct fcf_record *fcf_record,
19688 uint16_t fcf_index)
19690 memset(fcf_record, 0, sizeof(struct fcf_record));
19691 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19692 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19693 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19694 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19695 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19696 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19697 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19698 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19699 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19700 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19701 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19702 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19703 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19704 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19705 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19706 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19707 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19708 /* Set the VLAN bit map */
19709 if (phba->valid_vlan) {
19710 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19711 = 1 << (phba->vlan_id % 8);
19716 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19717 * @phba: pointer to lpfc hba data structure.
19718 * @fcf_index: FCF table entry offset.
19720 * This routine is invoked to scan the entire FCF table by reading FCF
19721 * record and processing it one at a time starting from the @fcf_index
19722 * for initial FCF discovery or fast FCF failover rediscovery.
19724 * Return 0 if the mailbox command is submitted successfully, none 0
19728 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19731 LPFC_MBOXQ_t *mboxq;
19733 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19734 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19735 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19737 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19738 "2000 Failed to allocate mbox for "
19741 goto fail_fcf_scan;
19743 /* Construct the read FCF record mailbox command */
19744 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19747 goto fail_fcf_scan;
19749 /* Issue the mailbox command asynchronously */
19750 mboxq->vport = phba->pport;
19751 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19753 spin_lock_irq(&phba->hbalock);
19754 phba->hba_flag |= FCF_TS_INPROG;
19755 spin_unlock_irq(&phba->hbalock);
19757 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19758 if (rc == MBX_NOT_FINISHED)
19761 /* Reset eligible FCF count for new scan */
19762 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19763 phba->fcf.eligible_fcf_cnt = 0;
19769 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19770 /* FCF scan failed, clear FCF_TS_INPROG flag */
19771 spin_lock_irq(&phba->hbalock);
19772 phba->hba_flag &= ~FCF_TS_INPROG;
19773 spin_unlock_irq(&phba->hbalock);
19779 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19780 * @phba: pointer to lpfc hba data structure.
19781 * @fcf_index: FCF table entry offset.
19783 * This routine is invoked to read an FCF record indicated by @fcf_index
19784 * and to use it for FLOGI roundrobin FCF failover.
19786 * Return 0 if the mailbox command is submitted successfully, none 0
19790 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19793 LPFC_MBOXQ_t *mboxq;
19795 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19797 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19798 "2763 Failed to allocate mbox for "
19801 goto fail_fcf_read;
19803 /* Construct the read FCF record mailbox command */
19804 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19807 goto fail_fcf_read;
19809 /* Issue the mailbox command asynchronously */
19810 mboxq->vport = phba->pport;
19811 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19812 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19813 if (rc == MBX_NOT_FINISHED)
19819 if (error && mboxq)
19820 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19825 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19826 * @phba: pointer to lpfc hba data structure.
19827 * @fcf_index: FCF table entry offset.
19829 * This routine is invoked to read an FCF record indicated by @fcf_index to
19830 * determine whether it's eligible for FLOGI roundrobin failover list.
19832 * Return 0 if the mailbox command is submitted successfully, none 0
19836 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19839 LPFC_MBOXQ_t *mboxq;
19841 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19843 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19844 "2758 Failed to allocate mbox for "
19847 goto fail_fcf_read;
19849 /* Construct the read FCF record mailbox command */
19850 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19853 goto fail_fcf_read;
19855 /* Issue the mailbox command asynchronously */
19856 mboxq->vport = phba->pport;
19857 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19858 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19859 if (rc == MBX_NOT_FINISHED)
19865 if (error && mboxq)
19866 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19871 * lpfc_check_next_fcf_pri_level
19872 * @phba: pointer to the lpfc_hba struct for this port.
19873 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19874 * routine when the rr_bmask is empty. The FCF indecies are put into the
19875 * rr_bmask based on their priority level. Starting from the highest priority
19876 * to the lowest. The most likely FCF candidate will be in the highest
19877 * priority group. When this routine is called it searches the fcf_pri list for
19878 * next lowest priority group and repopulates the rr_bmask with only those
19881 * 1=success 0=failure
19884 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19886 uint16_t next_fcf_pri;
19887 uint16_t last_index;
19888 struct lpfc_fcf_pri *fcf_pri;
19892 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19893 LPFC_SLI4_FCF_TBL_INDX_MAX);
19894 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19895 "3060 Last IDX %d\n", last_index);
19897 /* Verify the priority list has 2 or more entries */
19898 spin_lock_irq(&phba->hbalock);
19899 if (list_empty(&phba->fcf.fcf_pri_list) ||
19900 list_is_singular(&phba->fcf.fcf_pri_list)) {
19901 spin_unlock_irq(&phba->hbalock);
19902 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19903 "3061 Last IDX %d\n", last_index);
19904 return 0; /* Empty rr list */
19906 spin_unlock_irq(&phba->hbalock);
19910 * Clear the rr_bmask and set all of the bits that are at this
19913 memset(phba->fcf.fcf_rr_bmask, 0,
19914 sizeof(*phba->fcf.fcf_rr_bmask));
19915 spin_lock_irq(&phba->hbalock);
19916 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19917 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19920 * the 1st priority that has not FLOGI failed
19921 * will be the highest.
19924 next_fcf_pri = fcf_pri->fcf_rec.priority;
19925 spin_unlock_irq(&phba->hbalock);
19926 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19927 rc = lpfc_sli4_fcf_rr_index_set(phba,
19928 fcf_pri->fcf_rec.fcf_index);
19932 spin_lock_irq(&phba->hbalock);
19935 * if next_fcf_pri was not set above and the list is not empty then
19936 * we have failed flogis on all of them. So reset flogi failed
19937 * and start at the beginning.
19939 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19940 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19941 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19943 * the 1st priority that has not FLOGI failed
19944 * will be the highest.
19947 next_fcf_pri = fcf_pri->fcf_rec.priority;
19948 spin_unlock_irq(&phba->hbalock);
19949 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19950 rc = lpfc_sli4_fcf_rr_index_set(phba,
19951 fcf_pri->fcf_rec.fcf_index);
19955 spin_lock_irq(&phba->hbalock);
19959 spin_unlock_irq(&phba->hbalock);
19964 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19965 * @phba: pointer to lpfc hba data structure.
19967 * This routine is to get the next eligible FCF record index in a round
19968 * robin fashion. If the next eligible FCF record index equals to the
19969 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19970 * shall be returned, otherwise, the next eligible FCF record's index
19971 * shall be returned.
19974 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19976 uint16_t next_fcf_index;
19979 /* Search start from next bit of currently registered FCF index */
19980 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19983 /* Determine the next fcf index to check */
19984 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19985 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19986 LPFC_SLI4_FCF_TBL_INDX_MAX,
19989 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19990 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19992 * If we have wrapped then we need to clear the bits that
19993 * have been tested so that we can detect when we should
19994 * change the priority level.
19996 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19997 LPFC_SLI4_FCF_TBL_INDX_MAX);
20001 /* Check roundrobin failover list empty condition */
20002 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20003 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20005 * If next fcf index is not found check if there are lower
20006 * Priority level fcf's in the fcf_priority list.
20007 * Set up the rr_bmask with all of the avaiable fcf bits
20008 * at that level and continue the selection process.
20010 if (lpfc_check_next_fcf_pri_level(phba))
20011 goto initial_priority;
20012 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20013 "2844 No roundrobin failover FCF available\n");
20015 return LPFC_FCOE_FCF_NEXT_NONE;
20018 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20019 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20020 LPFC_FCF_FLOGI_FAILED) {
20021 if (list_is_singular(&phba->fcf.fcf_pri_list))
20022 return LPFC_FCOE_FCF_NEXT_NONE;
20024 goto next_priority;
20027 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20028 "2845 Get next roundrobin failover FCF (x%x)\n",
20031 return next_fcf_index;
20035 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20036 * @phba: pointer to lpfc hba data structure.
20037 * @fcf_index: index into the FCF table to 'set'
20039 * This routine sets the FCF record index in to the eligible bmask for
20040 * roundrobin failover search. It checks to make sure that the index
20041 * does not go beyond the range of the driver allocated bmask dimension
20042 * before setting the bit.
20044 * Returns 0 if the index bit successfully set, otherwise, it returns
20048 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20050 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20051 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20052 "2610 FCF (x%x) reached driver's book "
20053 "keeping dimension:x%x\n",
20054 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20057 /* Set the eligible FCF record index bmask */
20058 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20060 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20061 "2790 Set FCF (x%x) to roundrobin FCF failover "
20062 "bmask\n", fcf_index);
20068 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20069 * @phba: pointer to lpfc hba data structure.
20070 * @fcf_index: index into the FCF table to 'clear'
20072 * This routine clears the FCF record index from the eligible bmask for
20073 * roundrobin failover search. It checks to make sure that the index
20074 * does not go beyond the range of the driver allocated bmask dimension
20075 * before clearing the bit.
20078 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20080 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20081 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20082 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20083 "2762 FCF (x%x) reached driver's book "
20084 "keeping dimension:x%x\n",
20085 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20088 /* Clear the eligible FCF record index bmask */
20089 spin_lock_irq(&phba->hbalock);
20090 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20092 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20093 list_del_init(&fcf_pri->list);
20097 spin_unlock_irq(&phba->hbalock);
20098 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20100 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20101 "2791 Clear FCF (x%x) from roundrobin failover "
20102 "bmask\n", fcf_index);
20106 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20107 * @phba: pointer to lpfc hba data structure.
20108 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20110 * This routine is the completion routine for the rediscover FCF table mailbox
20111 * command. If the mailbox command returned failure, it will try to stop the
20112 * FCF rediscover wait timer.
20115 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20117 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20118 uint32_t shdr_status, shdr_add_status;
20120 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20122 shdr_status = bf_get(lpfc_mbox_hdr_status,
20123 &redisc_fcf->header.cfg_shdr.response);
20124 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20125 &redisc_fcf->header.cfg_shdr.response);
20126 if (shdr_status || shdr_add_status) {
20127 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20128 "2746 Requesting for FCF rediscovery failed "
20129 "status x%x add_status x%x\n",
20130 shdr_status, shdr_add_status);
20131 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20132 spin_lock_irq(&phba->hbalock);
20133 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20134 spin_unlock_irq(&phba->hbalock);
20136 * CVL event triggered FCF rediscover request failed,
20137 * last resort to re-try current registered FCF entry.
20139 lpfc_retry_pport_discovery(phba);
20141 spin_lock_irq(&phba->hbalock);
20142 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20143 spin_unlock_irq(&phba->hbalock);
20145 * DEAD FCF event triggered FCF rediscover request
20146 * failed, last resort to fail over as a link down
20147 * to FCF registration.
20149 lpfc_sli4_fcf_dead_failthrough(phba);
20152 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20153 "2775 Start FCF rediscover quiescent timer\n");
20155 * Start FCF rediscovery wait timer for pending FCF
20156 * before rescan FCF record table.
20158 lpfc_fcf_redisc_wait_start_timer(phba);
20161 mempool_free(mbox, phba->mbox_mem_pool);
20165 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20166 * @phba: pointer to lpfc hba data structure.
20168 * This routine is invoked to request for rediscovery of the entire FCF table
20172 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20174 LPFC_MBOXQ_t *mbox;
20175 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20178 /* Cancel retry delay timers to all vports before FCF rediscover */
20179 lpfc_cancel_all_vport_retry_delay_timer(phba);
20181 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20184 "2745 Failed to allocate mbox for "
20185 "requesting FCF rediscover.\n");
20189 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20190 sizeof(struct lpfc_sli4_cfg_mhdr));
20191 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20192 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20193 length, LPFC_SLI4_MBX_EMBED);
20195 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20196 /* Set count to 0 for invalidating the entire FCF database */
20197 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20199 /* Issue the mailbox command asynchronously */
20200 mbox->vport = phba->pport;
20201 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20204 if (rc == MBX_NOT_FINISHED) {
20205 mempool_free(mbox, phba->mbox_mem_pool);
20212 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20213 * @phba: pointer to lpfc hba data structure.
20215 * This function is the failover routine as a last resort to the FCF DEAD
20216 * event when driver failed to perform fast FCF failover.
20219 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20221 uint32_t link_state;
20224 * Last resort as FCF DEAD event failover will treat this as
20225 * a link down, but save the link state because we don't want
20226 * it to be changed to Link Down unless it is already down.
20228 link_state = phba->link_state;
20229 lpfc_linkdown(phba);
20230 phba->link_state = link_state;
20232 /* Unregister FCF if no devices connected to it */
20233 lpfc_unregister_unused_fcf(phba);
20237 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20238 * @phba: pointer to lpfc hba data structure.
20239 * @rgn23_data: pointer to configure region 23 data.
20241 * This function gets SLI3 port configure region 23 data through memory dump
20242 * mailbox command. When it successfully retrieves data, the size of the data
20243 * will be returned, otherwise, 0 will be returned.
20246 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20248 LPFC_MBOXQ_t *pmb = NULL;
20250 uint32_t offset = 0;
20256 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20259 "2600 failed to allocate mailbox memory\n");
20265 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20266 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20268 if (rc != MBX_SUCCESS) {
20269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20270 "2601 failed to read config "
20271 "region 23, rc 0x%x Status 0x%x\n",
20272 rc, mb->mbxStatus);
20273 mb->un.varDmp.word_cnt = 0;
20276 * dump mem may return a zero when finished or we got a
20277 * mailbox error, either way we are done.
20279 if (mb->un.varDmp.word_cnt == 0)
20282 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20283 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20285 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20286 rgn23_data + offset,
20287 mb->un.varDmp.word_cnt);
20288 offset += mb->un.varDmp.word_cnt;
20289 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20291 mempool_free(pmb, phba->mbox_mem_pool);
20296 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20297 * @phba: pointer to lpfc hba data structure.
20298 * @rgn23_data: pointer to configure region 23 data.
20300 * This function gets SLI4 port configure region 23 data through memory dump
20301 * mailbox command. When it successfully retrieves data, the size of the data
20302 * will be returned, otherwise, 0 will be returned.
20305 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20307 LPFC_MBOXQ_t *mboxq = NULL;
20308 struct lpfc_dmabuf *mp = NULL;
20309 struct lpfc_mqe *mqe;
20310 uint32_t data_length = 0;
20316 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20319 "3105 failed to allocate mailbox memory\n");
20323 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20325 mqe = &mboxq->u.mqe;
20326 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20327 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20330 data_length = mqe->un.mb_words[5];
20331 if (data_length == 0)
20333 if (data_length > DMP_RGN23_SIZE) {
20337 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20339 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20340 return data_length;
20344 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20345 * @phba: pointer to lpfc hba data structure.
20347 * This function read region 23 and parse TLV for port status to
20348 * decide if the user disaled the port. If the TLV indicates the
20349 * port is disabled, the hba_flag is set accordingly.
20352 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20354 uint8_t *rgn23_data = NULL;
20355 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20356 uint32_t offset = 0;
20358 /* Get adapter Region 23 data */
20359 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20363 if (phba->sli_rev < LPFC_SLI_REV4)
20364 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20366 if_type = bf_get(lpfc_sli_intf_if_type,
20367 &phba->sli4_hba.sli_intf);
20368 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20370 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20376 /* Check the region signature first */
20377 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20379 "2619 Config region 23 has bad signature\n");
20384 /* Check the data structure version */
20385 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20386 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20387 "2620 Config region 23 has bad version\n");
20392 /* Parse TLV entries in the region */
20393 while (offset < data_size) {
20394 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20397 * If the TLV is not driver specific TLV or driver id is
20398 * not linux driver id, skip the record.
20400 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20401 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20402 (rgn23_data[offset + 3] != 0)) {
20403 offset += rgn23_data[offset + 1] * 4 + 4;
20407 /* Driver found a driver specific TLV in the config region */
20408 sub_tlv_len = rgn23_data[offset + 1] * 4;
20413 * Search for configured port state sub-TLV.
20415 while ((offset < data_size) &&
20416 (tlv_offset < sub_tlv_len)) {
20417 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20422 if (rgn23_data[offset] != PORT_STE_TYPE) {
20423 offset += rgn23_data[offset + 1] * 4 + 4;
20424 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20428 /* This HBA contains PORT_STE configured */
20429 if (!rgn23_data[offset + 2])
20430 phba->hba_flag |= LINK_DISABLED;
20442 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20443 * @phba: pointer to lpfc hba data structure
20444 * @shdr_status: wr_object rsp's status field
20445 * @shdr_add_status: wr_object rsp's add_status field
20446 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20447 * @shdr_change_status: wr_object rsp's change_status field
20448 * @shdr_csf: wr_object rsp's csf bit
20450 * This routine is intended to be called after a firmware write completes.
20451 * It will log next action items to be performed by the user to instantiate
20452 * the newly downloaded firmware or reason for incompatibility.
20455 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20456 u32 shdr_add_status, u32 shdr_add_status_2,
20457 u32 shdr_change_status, u32 shdr_csf)
20459 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20460 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20461 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20462 "change_status x%02x, csf %01x\n", __func__,
20463 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20464 shdr_status, shdr_add_status, shdr_add_status_2,
20465 shdr_change_status, shdr_csf);
20467 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20468 switch (shdr_add_status_2) {
20469 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20470 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20471 "4199 Firmware write failed: "
20472 "image incompatible with flash x%02x\n",
20473 phba->sli4_hba.flash_id);
20475 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20477 "4200 Firmware write failed: "
20478 "image incompatible with ASIC "
20479 "architecture x%02x\n",
20480 phba->sli4_hba.asic_rev);
20483 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20484 "4210 Firmware write failed: "
20485 "add_status_2 x%02x\n",
20486 shdr_add_status_2);
20489 } else if (!shdr_status && !shdr_add_status) {
20490 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20491 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20493 shdr_change_status =
20494 LPFC_CHANGE_STATUS_PCI_RESET;
20497 switch (shdr_change_status) {
20498 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20499 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20500 "3198 Firmware write complete: System "
20501 "reboot required to instantiate\n");
20503 case (LPFC_CHANGE_STATUS_FW_RESET):
20504 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20505 "3199 Firmware write complete: "
20506 "Firmware reset required to "
20509 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20510 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20511 "3200 Firmware write complete: Port "
20512 "Migration or PCI Reset required to "
20515 case (LPFC_CHANGE_STATUS_PCI_RESET):
20516 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20517 "3201 Firmware write complete: PCI "
20518 "Reset required to instantiate\n");
20527 * lpfc_wr_object - write an object to the firmware
20528 * @phba: HBA structure that indicates port to create a queue on.
20529 * @dmabuf_list: list of dmabufs to write to the port.
20530 * @size: the total byte value of the objects to write to the port.
20531 * @offset: the current offset to be used to start the transfer.
20533 * This routine will create a wr_object mailbox command to send to the port.
20534 * the mailbox command will be constructed using the dma buffers described in
20535 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20536 * BDEs that the imbedded mailbox can support. The @offset variable will be
20537 * used to indicate the starting offset of the transfer and will also return
20538 * the offset after the write object mailbox has completed. @size is used to
20539 * determine the end of the object and whether the eof bit should be set.
20541 * Return 0 is successful and offset will contain the the new offset to use
20542 * for the next write.
20543 * Return negative value for error cases.
20546 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20547 uint32_t size, uint32_t *offset)
20549 struct lpfc_mbx_wr_object *wr_object;
20550 LPFC_MBOXQ_t *mbox;
20552 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20553 uint32_t shdr_change_status = 0, shdr_csf = 0;
20555 struct lpfc_dmabuf *dmabuf;
20556 uint32_t written = 0;
20557 bool check_change_status = false;
20559 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20563 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20564 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20565 sizeof(struct lpfc_mbx_wr_object) -
20566 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20568 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20569 wr_object->u.request.write_offset = *offset;
20570 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20571 wr_object->u.request.object_name[0] =
20572 cpu_to_le32(wr_object->u.request.object_name[0]);
20573 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20574 list_for_each_entry(dmabuf, dmabuf_list, list) {
20575 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20577 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20578 wr_object->u.request.bde[i].addrHigh =
20579 putPaddrHigh(dmabuf->phys);
20580 if (written + SLI4_PAGE_SIZE >= size) {
20581 wr_object->u.request.bde[i].tus.f.bdeSize =
20583 written += (size - written);
20584 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20585 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20586 check_change_status = true;
20588 wr_object->u.request.bde[i].tus.f.bdeSize =
20590 written += SLI4_PAGE_SIZE;
20594 wr_object->u.request.bde_count = i;
20595 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20596 if (!phba->sli4_hba.intr_enable)
20597 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20599 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20600 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20602 /* The IOCTL status is embedded in the mailbox subheader. */
20603 shdr_status = bf_get(lpfc_mbox_hdr_status,
20604 &wr_object->header.cfg_shdr.response);
20605 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20606 &wr_object->header.cfg_shdr.response);
20607 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20608 &wr_object->header.cfg_shdr.response);
20609 if (check_change_status) {
20610 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20611 &wr_object->u.response);
20612 shdr_csf = bf_get(lpfc_wr_object_csf,
20613 &wr_object->u.response);
20616 if (!phba->sli4_hba.intr_enable)
20617 mempool_free(mbox, phba->mbox_mem_pool);
20618 else if (rc != MBX_TIMEOUT)
20619 mempool_free(mbox, phba->mbox_mem_pool);
20620 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20621 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20622 "3025 Write Object mailbox failed with "
20623 "status x%x add_status x%x, add_status_2 x%x, "
20624 "mbx status x%x\n",
20625 shdr_status, shdr_add_status, shdr_add_status_2,
20628 *offset = shdr_add_status;
20630 *offset += wr_object->u.response.actual_write_length;
20633 if (rc || check_change_status)
20634 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20635 shdr_add_status_2, shdr_change_status,
20641 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20642 * @vport: pointer to vport data structure.
20644 * This function iterate through the mailboxq and clean up all REG_LOGIN
20645 * and REG_VPI mailbox commands associated with the vport. This function
20646 * is called when driver want to restart discovery of the vport due to
20647 * a Clear Virtual Link event.
20650 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20652 struct lpfc_hba *phba = vport->phba;
20653 LPFC_MBOXQ_t *mb, *nextmb;
20654 struct lpfc_nodelist *ndlp;
20655 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20656 LIST_HEAD(mbox_cmd_list);
20657 uint8_t restart_loop;
20659 /* Clean up internally queued mailbox commands with the vport */
20660 spin_lock_irq(&phba->hbalock);
20661 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20662 if (mb->vport != vport)
20665 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20666 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20669 list_move_tail(&mb->list, &mbox_cmd_list);
20671 /* Clean up active mailbox command with the vport */
20672 mb = phba->sli.mbox_active;
20673 if (mb && (mb->vport == vport)) {
20674 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20675 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20676 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20677 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20678 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20680 /* This reference is local to this routine. The
20681 * reference is removed at routine exit.
20683 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20685 /* Unregister the RPI when mailbox complete */
20686 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20689 /* Cleanup any mailbox completions which are not yet processed */
20692 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20694 * If this mailox is already processed or it is
20695 * for another vport ignore it.
20697 if ((mb->vport != vport) ||
20698 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20701 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20702 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20705 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20706 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20707 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20708 /* Unregister the RPI when mailbox complete */
20709 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20711 spin_unlock_irq(&phba->hbalock);
20712 spin_lock(&ndlp->lock);
20713 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20714 spin_unlock(&ndlp->lock);
20715 spin_lock_irq(&phba->hbalock);
20719 } while (restart_loop);
20721 spin_unlock_irq(&phba->hbalock);
20723 /* Release the cleaned-up mailbox commands */
20724 while (!list_empty(&mbox_cmd_list)) {
20725 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20726 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20727 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20728 mb->ctx_ndlp = NULL;
20730 spin_lock(&ndlp->lock);
20731 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20732 spin_unlock(&ndlp->lock);
20733 lpfc_nlp_put(ndlp);
20736 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
20739 /* Release the ndlp with the cleaned-up active mailbox command */
20740 if (act_mbx_ndlp) {
20741 spin_lock(&act_mbx_ndlp->lock);
20742 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20743 spin_unlock(&act_mbx_ndlp->lock);
20744 lpfc_nlp_put(act_mbx_ndlp);
20749 * lpfc_drain_txq - Drain the txq
20750 * @phba: Pointer to HBA context object.
20752 * This function attempt to submit IOCBs on the txq
20753 * to the adapter. For SLI4 adapters, the txq contains
20754 * ELS IOCBs that have been deferred because the there
20755 * are no SGLs. This congestion can occur with large
20756 * vport counts during node discovery.
20760 lpfc_drain_txq(struct lpfc_hba *phba)
20762 LIST_HEAD(completions);
20763 struct lpfc_sli_ring *pring;
20764 struct lpfc_iocbq *piocbq = NULL;
20765 unsigned long iflags = 0;
20766 char *fail_msg = NULL;
20767 uint32_t txq_cnt = 0;
20768 struct lpfc_queue *wq;
20771 if (phba->link_flag & LS_MDS_LOOPBACK) {
20772 /* MDS WQE are posted only to first WQ*/
20773 wq = phba->sli4_hba.hdwq[0].io_wq;
20778 wq = phba->sli4_hba.els_wq;
20781 pring = lpfc_phba_elsring(phba);
20784 if (unlikely(!pring) || list_empty(&pring->txq))
20787 spin_lock_irqsave(&pring->ring_lock, iflags);
20788 list_for_each_entry(piocbq, &pring->txq, list) {
20792 if (txq_cnt > pring->txq_max)
20793 pring->txq_max = txq_cnt;
20795 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20797 while (!list_empty(&pring->txq)) {
20798 spin_lock_irqsave(&pring->ring_lock, iflags);
20800 piocbq = lpfc_sli_ringtx_get(phba, pring);
20802 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20804 "2823 txq empty and txq_cnt is %d\n ",
20810 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
20812 if (ret && ret != IOCB_BUSY) {
20813 fail_msg = " - Cannot send IO ";
20814 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
20817 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
20818 /* Failed means we can't issue and need to cancel */
20819 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20820 "2822 IOCB failed %s iotag 0x%x "
20821 "xri 0x%x %d flg x%x\n",
20822 fail_msg, piocbq->iotag,
20823 piocbq->sli4_xritag, ret,
20825 list_add_tail(&piocbq->list, &completions);
20828 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20829 if (txq_cnt == 0 || ret == IOCB_BUSY)
20832 /* Cancel all the IOCBs that cannot be issued */
20833 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20834 IOERR_SLI_ABORTED);
20840 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20841 * @phba: Pointer to HBA context object.
20842 * @pwqeq: Pointer to command WQE.
20843 * @sglq: Pointer to the scatter gather queue object.
20845 * This routine converts the bpl or bde that is in the WQE
20846 * to a sgl list for the sli4 hardware. The physical address
20847 * of the bpl/bde is converted back to a virtual address.
20848 * If the WQE contains a BPL then the list of BDE's is
20849 * converted to sli4_sge's. If the WQE contains a single
20850 * BDE then it is converted to a single sli_sge.
20851 * The WQE is still in cpu endianness so the contents of
20852 * the bpl can be used without byte swapping.
20854 * Returns valid XRI = Success, NO_XRI = Failure.
20857 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20858 struct lpfc_sglq *sglq)
20860 uint16_t xritag = NO_XRI;
20861 struct ulp_bde64 *bpl = NULL;
20862 struct ulp_bde64 bde;
20863 struct sli4_sge *sgl = NULL;
20864 struct lpfc_dmabuf *dmabuf;
20865 union lpfc_wqe128 *wqe;
20868 uint32_t offset = 0; /* accumulated offset in the sg request list */
20869 int inbound = 0; /* number of sg reply entries inbound from firmware */
20872 if (!pwqeq || !sglq)
20875 sgl = (struct sli4_sge *)sglq->sgl;
20877 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20879 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20880 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20881 return sglq->sli4_xritag;
20882 numBdes = pwqeq->num_bdes;
20884 /* The addrHigh and addrLow fields within the WQE
20885 * have not been byteswapped yet so there is no
20886 * need to swap them back.
20888 if (pwqeq->bpl_dmabuf)
20889 dmabuf = pwqeq->bpl_dmabuf;
20893 bpl = (struct ulp_bde64 *)dmabuf->virt;
20897 for (i = 0; i < numBdes; i++) {
20898 /* Should already be byte swapped. */
20899 sgl->addr_hi = bpl->addrHigh;
20900 sgl->addr_lo = bpl->addrLow;
20902 sgl->word2 = le32_to_cpu(sgl->word2);
20903 if ((i+1) == numBdes)
20904 bf_set(lpfc_sli4_sge_last, sgl, 1);
20906 bf_set(lpfc_sli4_sge_last, sgl, 0);
20907 /* swap the size field back to the cpu so we
20908 * can assign it to the sgl.
20910 bde.tus.w = le32_to_cpu(bpl->tus.w);
20911 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20912 /* The offsets in the sgl need to be accumulated
20913 * separately for the request and reply lists.
20914 * The request is always first, the reply follows.
20917 case CMD_GEN_REQUEST64_WQE:
20918 /* add up the reply sg entries */
20919 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20921 /* first inbound? reset the offset */
20924 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20925 bf_set(lpfc_sli4_sge_type, sgl,
20926 LPFC_SGE_TYPE_DATA);
20927 offset += bde.tus.f.bdeSize;
20929 case CMD_FCP_TRSP64_WQE:
20930 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20931 bf_set(lpfc_sli4_sge_type, sgl,
20932 LPFC_SGE_TYPE_DATA);
20934 case CMD_FCP_TSEND64_WQE:
20935 case CMD_FCP_TRECEIVE64_WQE:
20936 bf_set(lpfc_sli4_sge_type, sgl,
20937 bpl->tus.f.bdeFlags);
20941 offset += bde.tus.f.bdeSize;
20942 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20945 sgl->word2 = cpu_to_le32(sgl->word2);
20949 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20950 /* The addrHigh and addrLow fields of the BDE have not
20951 * been byteswapped yet so they need to be swapped
20952 * before putting them in the sgl.
20954 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20955 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20956 sgl->word2 = le32_to_cpu(sgl->word2);
20957 bf_set(lpfc_sli4_sge_last, sgl, 1);
20958 sgl->word2 = cpu_to_le32(sgl->word2);
20959 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20961 return sglq->sli4_xritag;
20965 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20966 * @phba: Pointer to HBA context object.
20967 * @qp: Pointer to HDW queue.
20968 * @pwqe: Pointer to command WQE.
20971 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20972 struct lpfc_iocbq *pwqe)
20974 union lpfc_wqe128 *wqe = &pwqe->wqe;
20975 struct lpfc_async_xchg_ctx *ctxp;
20976 struct lpfc_queue *wq;
20977 struct lpfc_sglq *sglq;
20978 struct lpfc_sli_ring *pring;
20979 unsigned long iflags;
20982 /* NVME_LS and NVME_LS ABTS requests. */
20983 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
20984 pring = phba->sli4_hba.nvmels_wq->pring;
20985 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20987 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20989 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20992 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20993 pwqe->sli4_xritag = sglq->sli4_xritag;
20994 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20995 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20998 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20999 pwqe->sli4_xritag);
21000 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21002 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21006 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21007 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21009 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21013 /* NVME_FCREQ and NVME_ABTS requests */
21014 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21015 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21019 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21021 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21023 ret = lpfc_sli4_wq_put(wq, wqe);
21025 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21028 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21029 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21031 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21035 /* NVMET requests */
21036 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21037 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21041 ctxp = pwqe->context_un.axchg;
21042 sglq = ctxp->ctxbuf->sglq;
21043 if (pwqe->sli4_xritag == NO_XRI) {
21044 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21045 pwqe->sli4_xritag = sglq->sli4_xritag;
21047 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21048 pwqe->sli4_xritag);
21049 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21051 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21053 ret = lpfc_sli4_wq_put(wq, wqe);
21055 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21058 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21059 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21061 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21068 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21069 * @phba: Pointer to HBA context object.
21070 * @cmdiocb: Pointer to driver command iocb object.
21071 * @cmpl: completion function.
21073 * Fill the appropriate fields for the abort WQE and call
21074 * internal routine lpfc_sli4_issue_wqe to send the WQE
21075 * This function is called with hbalock held and no ring_lock held.
21077 * RETURNS 0 - SUCCESS
21081 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21084 struct lpfc_vport *vport = cmdiocb->vport;
21085 struct lpfc_iocbq *abtsiocb = NULL;
21086 union lpfc_wqe128 *abtswqe;
21087 struct lpfc_io_buf *lpfc_cmd;
21088 int retval = IOCB_ERROR;
21089 u16 xritag = cmdiocb->sli4_xritag;
21092 * The scsi command can not be in txq and it is in flight because the
21093 * pCmd is still pointing at the SCSI command we have to abort. There
21094 * is no need to search the txcmplq. Just send an abort to the FW.
21097 abtsiocb = __lpfc_sli_get_iocbq(phba);
21099 return WQE_NORESOURCE;
21101 /* Indicate the IO is being aborted by the driver. */
21102 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21104 abtswqe = &abtsiocb->wqe;
21105 memset(abtswqe, 0, sizeof(*abtswqe));
21107 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21108 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21109 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21110 abtswqe->abort_cmd.rsrvd5 = 0;
21111 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21112 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21113 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21114 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21115 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21116 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21117 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21119 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21120 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21121 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21122 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21123 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21124 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21125 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21126 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21127 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21128 abtsiocb->vport = vport;
21129 abtsiocb->cmd_cmpl = cmpl;
21131 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21132 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21134 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21135 "0359 Abort xri x%x, original iotag x%x, "
21136 "abort cmd iotag x%x retval x%x\n",
21137 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21140 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21141 __lpfc_sli_release_iocbq(phba, abtsiocb);
21147 #ifdef LPFC_MXP_STAT
21149 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21150 * @phba: pointer to lpfc hba data structure.
21151 * @hwqid: belong to which HWQ.
21153 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21154 * 15 seconds after a test case is running.
21156 * The user should call lpfc_debugfs_multixripools_write before running a test
21157 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21158 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21159 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21160 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21162 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21164 struct lpfc_sli4_hdw_queue *qp;
21165 struct lpfc_multixri_pool *multixri_pool;
21166 struct lpfc_pvt_pool *pvt_pool;
21167 struct lpfc_pbl_pool *pbl_pool;
21170 qp = &phba->sli4_hba.hdwq[hwqid];
21171 multixri_pool = qp->p_multixri_pool;
21172 if (!multixri_pool)
21175 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21176 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21177 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21178 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21180 multixri_pool->stat_pbl_count = pbl_pool->count;
21181 multixri_pool->stat_pvt_count = pvt_pool->count;
21182 multixri_pool->stat_busy_count = txcmplq_cnt;
21185 multixri_pool->stat_snapshot_taken++;
21190 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21191 * @phba: pointer to lpfc hba data structure.
21192 * @hwqid: belong to which HWQ.
21194 * This routine moves some XRIs from private to public pool when private pool
21197 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21199 struct lpfc_multixri_pool *multixri_pool;
21201 u32 prev_io_req_count;
21203 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21204 if (!multixri_pool)
21206 io_req_count = multixri_pool->io_req_count;
21207 prev_io_req_count = multixri_pool->prev_io_req_count;
21209 if (prev_io_req_count != io_req_count) {
21210 /* Private pool is busy */
21211 multixri_pool->prev_io_req_count = io_req_count;
21213 /* Private pool is not busy.
21214 * Move XRIs from private to public pool.
21216 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21221 * lpfc_adjust_high_watermark - Adjust high watermark
21222 * @phba: pointer to lpfc hba data structure.
21223 * @hwqid: belong to which HWQ.
21225 * This routine sets high watermark as number of outstanding XRIs,
21226 * but make sure the new value is between xri_limit/2 and xri_limit.
21228 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21236 struct lpfc_multixri_pool *multixri_pool;
21237 struct lpfc_sli4_hdw_queue *qp;
21239 qp = &phba->sli4_hba.hdwq[hwqid];
21240 multixri_pool = qp->p_multixri_pool;
21241 if (!multixri_pool)
21243 xri_limit = multixri_pool->xri_limit;
21245 watermark_max = xri_limit;
21246 watermark_min = xri_limit / 2;
21248 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21249 abts_io_bufs = qp->abts_scsi_io_bufs;
21250 abts_io_bufs += qp->abts_nvme_io_bufs;
21252 new_watermark = txcmplq_cnt + abts_io_bufs;
21253 new_watermark = min(watermark_max, new_watermark);
21254 new_watermark = max(watermark_min, new_watermark);
21255 multixri_pool->pvt_pool.high_watermark = new_watermark;
21257 #ifdef LPFC_MXP_STAT
21258 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21264 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21265 * @phba: pointer to lpfc hba data structure.
21266 * @hwqid: belong to which HWQ.
21268 * This routine is called from hearbeat timer when pvt_pool is idle.
21269 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21270 * The first step moves (all - low_watermark) amount of XRIs.
21271 * The second step moves the rest of XRIs.
21273 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21275 struct lpfc_pbl_pool *pbl_pool;
21276 struct lpfc_pvt_pool *pvt_pool;
21277 struct lpfc_sli4_hdw_queue *qp;
21278 struct lpfc_io_buf *lpfc_ncmd;
21279 struct lpfc_io_buf *lpfc_ncmd_next;
21280 unsigned long iflag;
21281 struct list_head tmp_list;
21284 qp = &phba->sli4_hba.hdwq[hwqid];
21285 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21286 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21289 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21290 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21292 if (pvt_pool->count > pvt_pool->low_watermark) {
21293 /* Step 1: move (all - low_watermark) from pvt_pool
21297 /* Move low watermark of bufs from pvt_pool to tmp_list */
21298 INIT_LIST_HEAD(&tmp_list);
21299 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21300 &pvt_pool->list, list) {
21301 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21303 if (tmp_count >= pvt_pool->low_watermark)
21307 /* Move all bufs from pvt_pool to pbl_pool */
21308 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21310 /* Move all bufs from tmp_list to pvt_pool */
21311 list_splice(&tmp_list, &pvt_pool->list);
21313 pbl_pool->count += (pvt_pool->count - tmp_count);
21314 pvt_pool->count = tmp_count;
21316 /* Step 2: move the rest from pvt_pool to pbl_pool */
21317 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21318 pbl_pool->count += pvt_pool->count;
21319 pvt_pool->count = 0;
21322 spin_unlock(&pvt_pool->lock);
21323 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21327 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21328 * @phba: pointer to lpfc hba data structure
21329 * @qp: pointer to HDW queue
21330 * @pbl_pool: specified public free XRI pool
21331 * @pvt_pool: specified private free XRI pool
21332 * @count: number of XRIs to move
21334 * This routine tries to move some free common bufs from the specified pbl_pool
21335 * to the specified pvt_pool. It might move less than count XRIs if there's not
21336 * enough in public pool.
21339 * true - if XRIs are successfully moved from the specified pbl_pool to the
21340 * specified pvt_pool
21341 * false - if the specified pbl_pool is empty or locked by someone else
21344 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21345 struct lpfc_pbl_pool *pbl_pool,
21346 struct lpfc_pvt_pool *pvt_pool, u32 count)
21348 struct lpfc_io_buf *lpfc_ncmd;
21349 struct lpfc_io_buf *lpfc_ncmd_next;
21350 unsigned long iflag;
21353 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21355 if (pbl_pool->count) {
21356 /* Move a batch of XRIs from public to private pool */
21357 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21358 list_for_each_entry_safe(lpfc_ncmd,
21362 list_move_tail(&lpfc_ncmd->list,
21371 spin_unlock(&pvt_pool->lock);
21372 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21375 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21382 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21383 * @phba: pointer to lpfc hba data structure.
21384 * @hwqid: belong to which HWQ.
21385 * @count: number of XRIs to move
21387 * This routine tries to find some free common bufs in one of public pools with
21388 * Round Robin method. The search always starts from local hwqid, then the next
21389 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21390 * a batch of free common bufs are moved to private pool on hwqid.
21391 * It might move less than count XRIs if there's not enough in public pool.
21393 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21395 struct lpfc_multixri_pool *multixri_pool;
21396 struct lpfc_multixri_pool *next_multixri_pool;
21397 struct lpfc_pvt_pool *pvt_pool;
21398 struct lpfc_pbl_pool *pbl_pool;
21399 struct lpfc_sli4_hdw_queue *qp;
21404 qp = &phba->sli4_hba.hdwq[hwqid];
21405 multixri_pool = qp->p_multixri_pool;
21406 pvt_pool = &multixri_pool->pvt_pool;
21407 pbl_pool = &multixri_pool->pbl_pool;
21409 /* Check if local pbl_pool is available */
21410 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21412 #ifdef LPFC_MXP_STAT
21413 multixri_pool->local_pbl_hit_count++;
21418 hwq_count = phba->cfg_hdw_queue;
21420 /* Get the next hwqid which was found last time */
21421 next_hwqid = multixri_pool->rrb_next_hwqid;
21424 /* Go to next hwq */
21425 next_hwqid = (next_hwqid + 1) % hwq_count;
21427 next_multixri_pool =
21428 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21429 pbl_pool = &next_multixri_pool->pbl_pool;
21431 /* Check if the public free xri pool is available */
21432 ret = _lpfc_move_xri_pbl_to_pvt(
21433 phba, qp, pbl_pool, pvt_pool, count);
21435 /* Exit while-loop if success or all hwqid are checked */
21436 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21438 /* Starting point for the next time */
21439 multixri_pool->rrb_next_hwqid = next_hwqid;
21442 /* stats: all public pools are empty*/
21443 multixri_pool->pbl_empty_count++;
21446 #ifdef LPFC_MXP_STAT
21448 if (next_hwqid == hwqid)
21449 multixri_pool->local_pbl_hit_count++;
21451 multixri_pool->other_pbl_hit_count++;
21457 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21458 * @phba: pointer to lpfc hba data structure.
21459 * @hwqid: belong to which HWQ.
21461 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21464 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21466 struct lpfc_multixri_pool *multixri_pool;
21467 struct lpfc_pvt_pool *pvt_pool;
21469 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21470 pvt_pool = &multixri_pool->pvt_pool;
21472 if (pvt_pool->count < pvt_pool->low_watermark)
21473 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21477 * lpfc_release_io_buf - Return one IO buf back to free pool
21478 * @phba: pointer to lpfc hba data structure.
21479 * @lpfc_ncmd: IO buf to be returned.
21480 * @qp: belong to which HWQ.
21482 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21483 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21484 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21485 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21486 * lpfc_io_buf_list_put.
21488 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21489 struct lpfc_sli4_hdw_queue *qp)
21491 unsigned long iflag;
21492 struct lpfc_pbl_pool *pbl_pool;
21493 struct lpfc_pvt_pool *pvt_pool;
21494 struct lpfc_epd_pool *epd_pool;
21500 /* MUST zero fields if buffer is reused by another protocol */
21501 lpfc_ncmd->nvmeCmd = NULL;
21502 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21504 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21505 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21506 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21508 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21509 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21511 if (phba->cfg_xri_rebalancing) {
21512 if (lpfc_ncmd->expedite) {
21513 /* Return to expedite pool */
21514 epd_pool = &phba->epd_pool;
21515 spin_lock_irqsave(&epd_pool->lock, iflag);
21516 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21518 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21522 /* Avoid invalid access if an IO sneaks in and is being rejected
21523 * just _after_ xri pools are destroyed in lpfc_offline.
21524 * Nothing much can be done at this point.
21526 if (!qp->p_multixri_pool)
21529 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21530 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21532 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21533 abts_io_bufs = qp->abts_scsi_io_bufs;
21534 abts_io_bufs += qp->abts_nvme_io_bufs;
21536 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21537 xri_limit = qp->p_multixri_pool->xri_limit;
21539 #ifdef LPFC_MXP_STAT
21540 if (xri_owned <= xri_limit)
21541 qp->p_multixri_pool->below_limit_count++;
21543 qp->p_multixri_pool->above_limit_count++;
21546 /* XRI goes to either public or private free xri pool
21547 * based on watermark and xri_limit
21549 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21550 (xri_owned < xri_limit &&
21551 pvt_pool->count < pvt_pool->high_watermark)) {
21552 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21553 qp, free_pvt_pool);
21554 list_add_tail(&lpfc_ncmd->list,
21557 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21559 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21560 qp, free_pub_pool);
21561 list_add_tail(&lpfc_ncmd->list,
21564 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21567 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21569 list_add_tail(&lpfc_ncmd->list,
21570 &qp->lpfc_io_buf_list_put);
21572 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21578 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21579 * @phba: pointer to lpfc hba data structure.
21580 * @qp: pointer to HDW queue
21581 * @pvt_pool: pointer to private pool data structure.
21582 * @ndlp: pointer to lpfc nodelist data structure.
21584 * This routine tries to get one free IO buf from private pool.
21587 * pointer to one free IO buf - if private pool is not empty
21588 * NULL - if private pool is empty
21590 static struct lpfc_io_buf *
21591 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21592 struct lpfc_sli4_hdw_queue *qp,
21593 struct lpfc_pvt_pool *pvt_pool,
21594 struct lpfc_nodelist *ndlp)
21596 struct lpfc_io_buf *lpfc_ncmd;
21597 struct lpfc_io_buf *lpfc_ncmd_next;
21598 unsigned long iflag;
21600 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21601 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21602 &pvt_pool->list, list) {
21603 if (lpfc_test_rrq_active(
21604 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21606 list_del(&lpfc_ncmd->list);
21608 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21611 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21617 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21618 * @phba: pointer to lpfc hba data structure.
21620 * This routine tries to get one free IO buf from expedite pool.
21623 * pointer to one free IO buf - if expedite pool is not empty
21624 * NULL - if expedite pool is empty
21626 static struct lpfc_io_buf *
21627 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21629 struct lpfc_io_buf *lpfc_ncmd;
21630 struct lpfc_io_buf *lpfc_ncmd_next;
21631 unsigned long iflag;
21632 struct lpfc_epd_pool *epd_pool;
21634 epd_pool = &phba->epd_pool;
21637 spin_lock_irqsave(&epd_pool->lock, iflag);
21638 if (epd_pool->count > 0) {
21639 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21640 &epd_pool->list, list) {
21641 list_del(&lpfc_ncmd->list);
21646 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21652 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21653 * @phba: pointer to lpfc hba data structure.
21654 * @ndlp: pointer to lpfc nodelist data structure.
21655 * @hwqid: belong to which HWQ
21656 * @expedite: 1 means this request is urgent.
21658 * This routine will do the following actions and then return a pointer to
21661 * 1. If private free xri count is empty, move some XRIs from public to
21663 * 2. Get one XRI from private free xri pool.
21664 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21665 * get one free xri from expedite pool.
21667 * Note: ndlp is only used on SCSI side for RRQ testing.
21668 * The caller should pass NULL for ndlp on NVME side.
21671 * pointer to one free IO buf - if private pool is not empty
21672 * NULL - if private pool is empty
21674 static struct lpfc_io_buf *
21675 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21676 struct lpfc_nodelist *ndlp,
21677 int hwqid, int expedite)
21679 struct lpfc_sli4_hdw_queue *qp;
21680 struct lpfc_multixri_pool *multixri_pool;
21681 struct lpfc_pvt_pool *pvt_pool;
21682 struct lpfc_io_buf *lpfc_ncmd;
21684 qp = &phba->sli4_hba.hdwq[hwqid];
21687 lpfc_printf_log(phba, KERN_INFO,
21688 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21689 "5556 NULL qp for hwqid x%x\n", hwqid);
21692 multixri_pool = qp->p_multixri_pool;
21693 if (!multixri_pool) {
21694 lpfc_printf_log(phba, KERN_INFO,
21695 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21696 "5557 NULL multixri for hwqid x%x\n", hwqid);
21699 pvt_pool = &multixri_pool->pvt_pool;
21701 lpfc_printf_log(phba, KERN_INFO,
21702 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21703 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
21706 multixri_pool->io_req_count++;
21708 /* If pvt_pool is empty, move some XRIs from public to private pool */
21709 if (pvt_pool->count == 0)
21710 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21712 /* Get one XRI from private free xri pool */
21713 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21716 lpfc_ncmd->hdwq = qp;
21717 lpfc_ncmd->hdwq_no = hwqid;
21718 } else if (expedite) {
21719 /* If we fail to get one from pvt_pool and this is an expedite
21720 * request, get one free xri from expedite pool.
21722 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21728 static inline struct lpfc_io_buf *
21729 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21731 struct lpfc_sli4_hdw_queue *qp;
21732 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21734 qp = &phba->sli4_hba.hdwq[idx];
21735 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21736 &qp->lpfc_io_buf_list_get, list) {
21737 if (lpfc_test_rrq_active(phba, ndlp,
21738 lpfc_cmd->cur_iocbq.sli4_lxritag))
21741 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21744 list_del_init(&lpfc_cmd->list);
21746 lpfc_cmd->hdwq = qp;
21747 lpfc_cmd->hdwq_no = idx;
21754 * lpfc_get_io_buf - Get one IO buffer from free pool
21755 * @phba: The HBA for which this call is being executed.
21756 * @ndlp: pointer to lpfc nodelist data structure.
21757 * @hwqid: belong to which HWQ
21758 * @expedite: 1 means this request is urgent.
21760 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21761 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21762 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21764 * Note: ndlp is only used on SCSI side for RRQ testing.
21765 * The caller should pass NULL for ndlp on NVME side.
21769 * Pointer to lpfc_io_buf - Success
21771 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21772 struct lpfc_nodelist *ndlp,
21773 u32 hwqid, int expedite)
21775 struct lpfc_sli4_hdw_queue *qp;
21776 unsigned long iflag;
21777 struct lpfc_io_buf *lpfc_cmd;
21779 qp = &phba->sli4_hba.hdwq[hwqid];
21782 lpfc_printf_log(phba, KERN_WARNING,
21783 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21784 "5555 NULL qp for hwqid x%x\n", hwqid);
21788 if (phba->cfg_xri_rebalancing)
21789 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21790 phba, ndlp, hwqid, expedite);
21792 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21793 qp, alloc_xri_get);
21794 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21795 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21797 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21798 qp, alloc_xri_put);
21799 list_splice(&qp->lpfc_io_buf_list_put,
21800 &qp->lpfc_io_buf_list_get);
21801 qp->get_io_bufs += qp->put_io_bufs;
21802 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21803 qp->put_io_bufs = 0;
21804 spin_unlock(&qp->io_buf_list_put_lock);
21805 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21807 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21809 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21816 * lpfc_read_object - Retrieve object data from HBA
21817 * @phba: The HBA for which this call is being executed.
21818 * @rdobject: Pathname of object data we want to read.
21819 * @datap: Pointer to where data will be copied to.
21820 * @datasz: size of data area
21822 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
21823 * The data will be truncated if datasz is not large enough.
21824 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
21825 * Returns the actual bytes read from the object.
21828 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
21831 struct lpfc_mbx_read_object *read_object;
21832 LPFC_MBOXQ_t *mbox;
21833 int rc, length, eof, j, byte_cnt = 0;
21834 uint32_t shdr_status, shdr_add_status;
21835 union lpfc_sli4_cfg_shdr *shdr;
21836 struct lpfc_dmabuf *pcmd;
21837 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
21839 /* sanity check on queue memory */
21843 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
21846 length = (sizeof(struct lpfc_mbx_read_object) -
21847 sizeof(struct lpfc_sli4_cfg_mhdr));
21848 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
21849 LPFC_MBOX_OPCODE_READ_OBJECT,
21850 length, LPFC_SLI4_MBX_EMBED);
21851 read_object = &mbox->u.mqe.un.read_object;
21852 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
21854 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
21855 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
21856 read_object->u.request.rd_object_offset = 0;
21857 read_object->u.request.rd_object_cnt = 1;
21859 memset((void *)read_object->u.request.rd_object_name, 0,
21861 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
21862 for (j = 0; j < strlen(rdobject); j++)
21863 read_object->u.request.rd_object_name[j] =
21864 cpu_to_le32(rd_object_name[j]);
21866 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
21868 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
21869 if (!pcmd || !pcmd->virt) {
21871 mempool_free(mbox, phba->mbox_mem_pool);
21874 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
21875 read_object->u.request.rd_object_hbuf[0].pa_lo =
21876 putPaddrLow(pcmd->phys);
21877 read_object->u.request.rd_object_hbuf[0].pa_hi =
21878 putPaddrHigh(pcmd->phys);
21879 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
21881 mbox->vport = phba->pport;
21882 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21883 mbox->ctx_ndlp = NULL;
21885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
21886 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
21887 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
21889 if (shdr_status == STATUS_FAILED &&
21890 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
21891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21892 "4674 No port cfg file in FW.\n");
21893 byte_cnt = -ENOENT;
21894 } else if (shdr_status || shdr_add_status || rc) {
21895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21896 "2625 READ_OBJECT mailbox failed with "
21897 "status x%x add_status x%x, mbx status x%x\n",
21898 shdr_status, shdr_add_status, rc);
21902 length = read_object->u.response.rd_object_actual_rlen;
21903 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
21904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
21905 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
21906 length, datasz, eof);
21908 /* Detect the port config file exists but is empty */
21909 if (!length && eof) {
21915 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
21919 /* This is an embedded SLI4 mailbox with an external buffer allocated.
21920 * Free the pcmd and then cleanup with the correct routine.
21922 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
21924 lpfc_sli4_mbox_cmd_free(phba, mbox);
21929 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21930 * @phba: The HBA for which this call is being executed.
21931 * @lpfc_buf: IO buf structure to append the SGL chunk
21933 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21934 * and will allocate an SGL chunk if the pool is empty.
21938 * Pointer to sli4_hybrid_sgl - Success
21940 struct sli4_hybrid_sgl *
21941 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21943 struct sli4_hybrid_sgl *list_entry = NULL;
21944 struct sli4_hybrid_sgl *tmp = NULL;
21945 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21946 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21947 struct list_head *buf_list = &hdwq->sgl_list;
21948 unsigned long iflags;
21950 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21952 if (likely(!list_empty(buf_list))) {
21953 /* break off 1 chunk from the sgl_list */
21954 list_for_each_entry_safe(list_entry, tmp,
21955 buf_list, list_node) {
21956 list_move_tail(&list_entry->list_node,
21957 &lpfc_buf->dma_sgl_xtra_list);
21961 /* allocate more */
21962 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21963 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21964 cpu_to_node(hdwq->io_wq->chann));
21966 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21967 "8353 error kmalloc memory for HDWQ "
21969 lpfc_buf->hdwq_no, __func__);
21973 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21974 GFP_ATOMIC, &tmp->dma_phys_sgl);
21975 if (!tmp->dma_sgl) {
21976 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21977 "8354 error pool_alloc memory for HDWQ "
21979 lpfc_buf->hdwq_no, __func__);
21984 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21985 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21988 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21989 struct sli4_hybrid_sgl,
21992 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21994 return allocated_sgl;
21998 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21999 * @phba: The HBA for which this call is being executed.
22000 * @lpfc_buf: IO buf structure with the SGL chunk
22002 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22009 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22012 struct sli4_hybrid_sgl *list_entry = NULL;
22013 struct sli4_hybrid_sgl *tmp = NULL;
22014 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22015 struct list_head *buf_list = &hdwq->sgl_list;
22016 unsigned long iflags;
22018 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22020 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22021 list_for_each_entry_safe(list_entry, tmp,
22022 &lpfc_buf->dma_sgl_xtra_list,
22024 list_move_tail(&list_entry->list_node,
22031 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22036 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22037 * @phba: phba object
22038 * @hdwq: hdwq to cleanup sgl buff resources on
22040 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22046 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22047 struct lpfc_sli4_hdw_queue *hdwq)
22049 struct list_head *buf_list = &hdwq->sgl_list;
22050 struct sli4_hybrid_sgl *list_entry = NULL;
22051 struct sli4_hybrid_sgl *tmp = NULL;
22052 unsigned long iflags;
22054 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22056 /* Free sgl pool */
22057 list_for_each_entry_safe(list_entry, tmp,
22058 buf_list, list_node) {
22059 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22060 list_entry->dma_sgl,
22061 list_entry->dma_phys_sgl);
22062 list_del(&list_entry->list_node);
22066 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22070 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22071 * @phba: The HBA for which this call is being executed.
22072 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22074 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22075 * and will allocate an CMD/RSP buffer if the pool is empty.
22079 * Pointer to fcp_cmd_rsp_buf - Success
22081 struct fcp_cmd_rsp_buf *
22082 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22083 struct lpfc_io_buf *lpfc_buf)
22085 struct fcp_cmd_rsp_buf *list_entry = NULL;
22086 struct fcp_cmd_rsp_buf *tmp = NULL;
22087 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22088 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22089 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22090 unsigned long iflags;
22092 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22094 if (likely(!list_empty(buf_list))) {
22095 /* break off 1 chunk from the list */
22096 list_for_each_entry_safe(list_entry, tmp,
22099 list_move_tail(&list_entry->list_node,
22100 &lpfc_buf->dma_cmd_rsp_list);
22104 /* allocate more */
22105 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22106 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22107 cpu_to_node(hdwq->io_wq->chann));
22109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22110 "8355 error kmalloc memory for HDWQ "
22112 lpfc_buf->hdwq_no, __func__);
22116 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22118 &tmp->fcp_cmd_rsp_dma_handle);
22120 if (!tmp->fcp_cmnd) {
22121 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22122 "8356 error pool_alloc memory for HDWQ "
22124 lpfc_buf->hdwq_no, __func__);
22129 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22130 sizeof(struct fcp_cmnd));
22132 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22133 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22136 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22137 struct fcp_cmd_rsp_buf,
22140 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22142 return allocated_buf;
22146 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22147 * @phba: The HBA for which this call is being executed.
22148 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22150 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22157 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22158 struct lpfc_io_buf *lpfc_buf)
22161 struct fcp_cmd_rsp_buf *list_entry = NULL;
22162 struct fcp_cmd_rsp_buf *tmp = NULL;
22163 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22164 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22165 unsigned long iflags;
22167 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22169 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22170 list_for_each_entry_safe(list_entry, tmp,
22171 &lpfc_buf->dma_cmd_rsp_list,
22173 list_move_tail(&list_entry->list_node,
22180 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22185 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22186 * @phba: phba object
22187 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22189 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22195 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22196 struct lpfc_sli4_hdw_queue *hdwq)
22198 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22199 struct fcp_cmd_rsp_buf *list_entry = NULL;
22200 struct fcp_cmd_rsp_buf *tmp = NULL;
22201 unsigned long iflags;
22203 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22205 /* Free cmd_rsp buf pool */
22206 list_for_each_entry_safe(list_entry, tmp,
22209 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22210 list_entry->fcp_cmnd,
22211 list_entry->fcp_cmd_rsp_dma_handle);
22212 list_del(&list_entry->list_node);
22216 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22220 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22221 * @phba: phba object
22222 * @job: job entry of the command to be posted.
22224 * Fill the common fields of the wqe for each of the command.
22230 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22235 u32 fip, abort_tag;
22236 struct lpfc_nodelist *ndlp = NULL;
22237 union lpfc_wqe128 *wqe = &job->wqe;
22238 u8 command_type = ELS_COMMAND_NON_FIP;
22240 fip = phba->hba_flag & HBA_FIP_SUPPORT;
22241 /* The fcp commands will set command type */
22242 if (job->cmd_flag & LPFC_IO_FCP)
22243 command_type = FCP_COMMAND;
22244 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22245 command_type = ELS_COMMAND_FIP;
22247 command_type = ELS_COMMAND_NON_FIP;
22249 abort_tag = job->iotag;
22250 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22253 case CMD_ELS_REQUEST64_WQE:
22256 if_type = bf_get(lpfc_sli_intf_if_type,
22257 &phba->sli4_hba.sli_intf);
22258 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22259 pcmd = (u32 *)job->cmd_dmabuf->virt;
22260 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22261 *pcmd == ELS_CMD_SCR ||
22262 *pcmd == ELS_CMD_RDF ||
22263 *pcmd == ELS_CMD_EDC ||
22264 *pcmd == ELS_CMD_RSCN_XMT ||
22265 *pcmd == ELS_CMD_FDISC ||
22266 *pcmd == ELS_CMD_LOGO ||
22267 *pcmd == ELS_CMD_QFPA ||
22268 *pcmd == ELS_CMD_UVEM ||
22269 *pcmd == ELS_CMD_PLOGI)) {
22270 bf_set(els_req64_sp, &wqe->els_req, 1);
22271 bf_set(els_req64_sid, &wqe->els_req,
22272 job->vport->fc_myDID);
22274 if ((*pcmd == ELS_CMD_FLOGI) &&
22275 !(phba->fc_topology ==
22276 LPFC_TOPOLOGY_LOOP))
22277 bf_set(els_req64_sid, &wqe->els_req, 0);
22279 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22280 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22281 phba->vpi_ids[job->vport->vpi]);
22283 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22284 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22285 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22289 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22290 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22292 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22293 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22294 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22295 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22296 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22298 case CMD_XMIT_ELS_RSP64_WQE:
22302 wqe->xmit_els_rsp.word4 = 0;
22304 if_type = bf_get(lpfc_sli_intf_if_type,
22305 &phba->sli4_hba.sli_intf);
22306 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22307 if (job->vport->fc_flag & FC_PT2PT) {
22308 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22309 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22310 job->vport->fc_myDID);
22311 if (job->vport->fc_myDID == Fabric_DID) {
22312 bf_set(wqe_els_did,
22313 &wqe->xmit_els_rsp.wqe_dest, 0);
22318 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22319 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22320 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22321 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22322 LPFC_WQE_LENLOC_WORD3);
22323 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22325 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22326 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22327 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22328 job->vport->fc_myDID);
22329 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22332 if (phba->sli_rev == LPFC_SLI_REV4) {
22333 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22334 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22336 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22337 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22338 phba->vpi_ids[job->vport->vpi]);
22340 command_type = OTHER_COMMAND;
22342 case CMD_GEN_REQUEST64_WQE:
22344 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22345 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22346 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22347 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22348 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22349 command_type = OTHER_COMMAND;
22351 case CMD_XMIT_SEQUENCE64_WQE:
22352 if (phba->link_flag & LS_LOOPBACK_MODE)
22353 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22355 wqe->xmit_sequence.rsvd3 = 0;
22356 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22357 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22358 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22359 LPFC_WQE_IOD_WRITE);
22360 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22361 LPFC_WQE_LENLOC_WORD12);
22362 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22363 command_type = OTHER_COMMAND;
22365 case CMD_XMIT_BLS_RSP64_WQE:
22366 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22367 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22368 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22369 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22370 phba->vpi_ids[phba->pport->vpi]);
22371 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22372 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22373 LPFC_WQE_LENLOC_NONE);
22374 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22375 command_type = OTHER_COMMAND;
22377 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
22378 case CMD_ABORT_XRI_WQE: /* abort iotag */
22379 case CMD_SEND_FRAME: /* mds loopback */
22380 /* cases already formatted for sli4 wqe - no chgs necessary */
22384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22385 "6207 Invalid command 0x%x\n",
22390 wqe->generic.wqe_com.abort_tag = abort_tag;
22391 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22392 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22393 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);