1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76 static int lpfc_post_rcv_buf(struct lpfc_hba *);
77 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79 static int lpfc_setup_endian_order(struct lpfc_hba *);
80 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83 static void lpfc_init_sgl_list(struct lpfc_hba *);
84 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85 static void lpfc_free_active_sgl(struct lpfc_hba *);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static struct scsi_transport_template *lpfc_transport_template = NULL;
99 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
100 static DEFINE_IDR(lpfc_hba_index);
101 #define LPFC_NVMET_BUF_POST 254
102 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
105 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
106 * @phba: pointer to lpfc hba data structure.
108 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
109 * mailbox command. It retrieves the revision information from the HBA and
110 * collects the Vital Product Data (VPD) about the HBA for preparing the
111 * configuration of the HBA.
115 * -ERESTART - requests the SLI layer to reset the HBA and try again.
116 * Any other value - indicates an error.
119 lpfc_config_port_prep(struct lpfc_hba *phba)
121 lpfc_vpd_t *vp = &phba->vpd;
125 char *lpfc_vpd_data = NULL;
127 static char licensed[56] =
128 "key unlock for use with gnu public licensed code only\0";
129 static int init_key = 1;
131 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
133 phba->link_state = LPFC_HBA_ERROR;
138 phba->link_state = LPFC_INIT_MBX_CMDS;
140 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
142 uint32_t *ptext = (uint32_t *) licensed;
144 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
145 *ptext = cpu_to_be32(*ptext);
149 lpfc_read_nv(phba, pmb);
150 memset((char*)mb->un.varRDnvp.rsvd3, 0,
151 sizeof (mb->un.varRDnvp.rsvd3));
152 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
157 if (rc != MBX_SUCCESS) {
158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
159 "0324 Config Port initialization "
160 "error, mbxCmd x%x READ_NVPARM, "
162 mb->mbxCommand, mb->mbxStatus);
163 mempool_free(pmb, phba->mbox_mem_pool);
166 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
168 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
173 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
174 * which was already set in lpfc_get_cfgparam()
176 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
178 /* Setup and issue mailbox READ REV command */
179 lpfc_read_rev(phba, pmb);
180 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
181 if (rc != MBX_SUCCESS) {
182 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
183 "0439 Adapter failed to init, mbxCmd x%x "
184 "READ_REV, mbxStatus x%x\n",
185 mb->mbxCommand, mb->mbxStatus);
186 mempool_free( pmb, phba->mbox_mem_pool);
192 * The value of rr must be 1 since the driver set the cv field to 1.
193 * This setting requires the FW to set all revision fields.
195 if (mb->un.varRdRev.rr == 0) {
197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
198 "0440 Adapter failed to init, READ_REV has "
199 "missing revision information.\n");
200 mempool_free(pmb, phba->mbox_mem_pool);
204 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
205 mempool_free(pmb, phba->mbox_mem_pool);
209 /* Save information as VPD data */
211 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
212 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
213 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
214 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
215 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
216 vp->rev.biuRev = mb->un.varRdRev.biuRev;
217 vp->rev.smRev = mb->un.varRdRev.smRev;
218 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
219 vp->rev.endecRev = mb->un.varRdRev.endecRev;
220 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
221 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
222 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
223 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
224 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
225 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
227 /* If the sli feature level is less then 9, we must
228 * tear down all RPIs and VPIs on link down if NPIV
231 if (vp->rev.feaLevelHigh < 9)
232 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
234 if (lpfc_is_LC_HBA(phba->pcidev->device))
235 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
236 sizeof (phba->RandomData));
238 /* Get adapter VPD information */
239 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
243 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
244 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
246 if (rc != MBX_SUCCESS) {
247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
248 "0441 VPD not present on adapter, "
249 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
250 mb->mbxCommand, mb->mbxStatus);
251 mb->un.varDmp.word_cnt = 0;
253 /* dump mem may return a zero when finished or we got a
254 * mailbox error, either way we are done.
256 if (mb->un.varDmp.word_cnt == 0)
259 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
260 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
261 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
262 lpfc_vpd_data + offset,
263 mb->un.varDmp.word_cnt);
264 offset += mb->un.varDmp.word_cnt;
265 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
267 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
269 kfree(lpfc_vpd_data);
271 mempool_free(pmb, phba->mbox_mem_pool);
276 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
277 * @phba: pointer to lpfc hba data structure.
278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
280 * This is the completion handler for driver's configuring asynchronous event
281 * mailbox command to the device. If the mailbox command returns successfully,
282 * it will set internal async event support flag to 1; otherwise, it will
283 * set internal async event support flag to 0.
286 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
288 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
289 phba->temp_sensor_support = 1;
291 phba->temp_sensor_support = 0;
292 mempool_free(pmboxq, phba->mbox_mem_pool);
297 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
298 * @phba: pointer to lpfc hba data structure.
299 * @pmboxq: pointer to the driver internal queue element for mailbox command.
301 * This is the completion handler for dump mailbox command for getting
302 * wake up parameters. When this command complete, the response contain
303 * Option rom version of the HBA. This function translate the version number
304 * into a human readable string and store it in OptionROMVersion.
307 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
310 uint32_t prog_id_word;
312 /* character array used for decoding dist type. */
313 char dist_char[] = "nabx";
315 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
316 mempool_free(pmboxq, phba->mbox_mem_pool);
320 prg = (struct prog_id *) &prog_id_word;
322 /* word 7 contain option rom version */
323 prog_id_word = pmboxq->u.mb.un.varWords[7];
325 /* Decode the Option rom version word to a readable string */
327 dist = dist_char[prg->dist];
329 if ((prg->dist == 3) && (prg->num == 0))
330 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
331 prg->ver, prg->rev, prg->lev);
333 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
334 prg->ver, prg->rev, prg->lev,
336 mempool_free(pmboxq, phba->mbox_mem_pool);
341 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
342 * cfg_soft_wwnn, cfg_soft_wwpn
343 * @vport: pointer to lpfc vport data structure.
350 lpfc_update_vport_wwn(struct lpfc_vport *vport)
352 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
353 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
355 /* If the soft name exists then update it using the service params */
356 if (vport->phba->cfg_soft_wwnn)
357 u64_to_wwn(vport->phba->cfg_soft_wwnn,
358 vport->fc_sparam.nodeName.u.wwn);
359 if (vport->phba->cfg_soft_wwpn)
360 u64_to_wwn(vport->phba->cfg_soft_wwpn,
361 vport->fc_sparam.portName.u.wwn);
364 * If the name is empty or there exists a soft name
365 * then copy the service params name, otherwise use the fc name
367 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
368 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
369 sizeof(struct lpfc_name));
371 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
372 sizeof(struct lpfc_name));
375 * If the port name has changed, then set the Param changes flag
378 if (vport->fc_portname.u.wwn[0] != 0 &&
379 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
380 sizeof(struct lpfc_name)))
381 vport->vport_flag |= FAWWPN_PARAM_CHG;
383 if (vport->fc_portname.u.wwn[0] == 0 ||
384 vport->phba->cfg_soft_wwpn ||
385 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
386 vport->vport_flag & FAWWPN_SET) {
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
389 vport->vport_flag &= ~FAWWPN_SET;
390 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
391 vport->vport_flag |= FAWWPN_SET;
394 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
395 sizeof(struct lpfc_name));
399 * lpfc_config_port_post - Perform lpfc initialization after config port
400 * @phba: pointer to lpfc hba data structure.
402 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
403 * command call. It performs all internal resource and state setups on the
404 * port: post IOCB buffers, enable appropriate host interrupt attentions,
405 * ELS ring timers, etc.
409 * Any other value - error.
412 lpfc_config_port_post(struct lpfc_hba *phba)
414 struct lpfc_vport *vport = phba->pport;
415 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
418 struct lpfc_dmabuf *mp;
419 struct lpfc_sli *psli = &phba->sli;
420 uint32_t status, timeout;
424 spin_lock_irq(&phba->hbalock);
426 * If the Config port completed correctly the HBA is not
427 * over heated any more.
429 if (phba->over_temp_state == HBA_OVER_TEMP)
430 phba->over_temp_state = HBA_NORMAL_TEMP;
431 spin_unlock_irq(&phba->hbalock);
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
435 phba->link_state = LPFC_HBA_ERROR;
440 /* Get login parameters for NID. */
441 rc = lpfc_read_sparam(phba, pmb, 0);
443 mempool_free(pmb, phba->mbox_mem_pool);
448 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
450 "0448 Adapter failed init, mbxCmd x%x "
451 "READ_SPARM mbxStatus x%x\n",
452 mb->mbxCommand, mb->mbxStatus);
453 phba->link_state = LPFC_HBA_ERROR;
454 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
455 mempool_free(pmb, phba->mbox_mem_pool);
456 lpfc_mbuf_free(phba, mp->virt, mp->phys);
461 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
463 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
464 lpfc_mbuf_free(phba, mp->virt, mp->phys);
467 lpfc_update_vport_wwn(vport);
469 /* Update the fc_host data structures with new wwn. */
470 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
471 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
472 fc_host_max_npiv_vports(shost) = phba->max_vpi;
474 /* If no serial number in VPD data, use low 6 bytes of WWNN */
475 /* This should be consolidated into parse_vpd ? - mr */
476 if (phba->SerialNumber[0] == 0) {
479 outptr = &vport->fc_nodename.u.s.IEEE[0];
480 for (i = 0; i < 12; i++) {
482 j = ((status & 0xf0) >> 4);
484 phba->SerialNumber[i] =
485 (char)((uint8_t) 0x30 + (uint8_t) j);
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
492 phba->SerialNumber[i] =
493 (char)((uint8_t) 0x30 + (uint8_t) j);
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
500 lpfc_read_config(phba, pmb);
502 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
504 "0453 Adapter failed to init, mbxCmd x%x "
505 "READ_CONFIG, mbxStatus x%x\n",
506 mb->mbxCommand, mb->mbxStatus);
507 phba->link_state = LPFC_HBA_ERROR;
508 mempool_free( pmb, phba->mbox_mem_pool);
512 /* Check if the port is disabled */
513 lpfc_sli_read_link_ste(phba);
515 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
516 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
518 "3359 HBA queue depth changed from %d to %d\n",
519 phba->cfg_hba_queue_depth,
520 mb->un.varRdConfig.max_xri);
521 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
524 phba->lmt = mb->un.varRdConfig.lmt;
526 /* Get the default values for Model Name and Description */
527 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
529 phba->link_state = LPFC_LINK_DOWN;
531 /* Only process IOCBs on ELS ring till hba_state is READY */
532 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
533 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
534 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
535 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
537 /* Post receive buffers for desired rings */
538 if (phba->sli_rev != 3)
539 lpfc_post_rcv_buf(phba);
542 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
544 if (phba->intr_type == MSIX) {
545 rc = lpfc_config_msi(phba, pmb);
547 mempool_free(pmb, phba->mbox_mem_pool);
550 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
551 if (rc != MBX_SUCCESS) {
552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
553 "0352 Config MSI mailbox command "
554 "failed, mbxCmd x%x, mbxStatus x%x\n",
555 pmb->u.mb.mbxCommand,
556 pmb->u.mb.mbxStatus);
557 mempool_free(pmb, phba->mbox_mem_pool);
562 spin_lock_irq(&phba->hbalock);
563 /* Initialize ERATT handling flag */
564 phba->hba_flag &= ~HBA_ERATT_HANDLED;
566 /* Enable appropriate host interrupts */
567 if (lpfc_readl(phba->HCregaddr, &status)) {
568 spin_unlock_irq(&phba->hbalock);
571 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
572 if (psli->num_rings > 0)
573 status |= HC_R0INT_ENA;
574 if (psli->num_rings > 1)
575 status |= HC_R1INT_ENA;
576 if (psli->num_rings > 2)
577 status |= HC_R2INT_ENA;
578 if (psli->num_rings > 3)
579 status |= HC_R3INT_ENA;
581 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
582 (phba->cfg_poll & DISABLE_FCP_RING_INT))
583 status &= ~(HC_R0INT_ENA);
585 writel(status, phba->HCregaddr);
586 readl(phba->HCregaddr); /* flush */
587 spin_unlock_irq(&phba->hbalock);
589 /* Set up ring-0 (ELS) timer */
590 timeout = phba->fc_ratov * 2;
591 mod_timer(&vport->els_tmofunc,
592 jiffies + msecs_to_jiffies(1000 * timeout));
593 /* Set up heart beat (HB) timer */
594 mod_timer(&phba->hb_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
596 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
597 phba->last_completion_time = jiffies;
598 /* Set up error attention (ERATT) polling timer */
599 mod_timer(&phba->eratt_poll,
600 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
602 if (phba->hba_flag & LINK_DISABLED) {
603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
604 "2598 Adapter Link is disabled.\n");
605 lpfc_down_link(phba, pmb);
606 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
607 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
610 "2599 Adapter failed to issue DOWN_LINK"
611 " mbox command rc 0x%x\n", rc);
613 mempool_free(pmb, phba->mbox_mem_pool);
616 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
617 mempool_free(pmb, phba->mbox_mem_pool);
618 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
622 /* MBOX buffer will be freed in mbox compl */
623 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
625 phba->link_state = LPFC_HBA_ERROR;
629 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
630 pmb->mbox_cmpl = lpfc_config_async_cmpl;
631 pmb->vport = phba->pport;
632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
634 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
636 "0456 Adapter failed to issue "
637 "ASYNCEVT_ENABLE mbox status x%x\n",
639 mempool_free(pmb, phba->mbox_mem_pool);
642 /* Get Option rom version */
643 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
645 phba->link_state = LPFC_HBA_ERROR;
649 lpfc_dump_wakeup_param(phba, pmb);
650 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
651 pmb->vport = phba->pport;
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
654 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
656 "0435 Adapter failed "
657 "to get Option ROM version status x%x\n", rc);
658 mempool_free(pmb, phba->mbox_mem_pool);
665 * lpfc_hba_init_link - Initialize the FC link
666 * @phba: pointer to lpfc hba data structure.
667 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
669 * This routine will issue the INIT_LINK mailbox command call.
670 * It is available to other drivers through the lpfc_hba data
671 * structure for use as a delayed link up mechanism with the
672 * module parameter lpfc_suppress_link_up.
676 * Any other value - error
679 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
681 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
685 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
686 * @phba: pointer to lpfc hba data structure.
687 * @fc_topology: desired fc topology.
688 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
690 * This routine will issue the INIT_LINK mailbox command call.
691 * It is available to other drivers through the lpfc_hba data
692 * structure for use as a delayed link up mechanism with the
693 * module parameter lpfc_suppress_link_up.
697 * Any other value - error
700 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
703 struct lpfc_vport *vport = phba->pport;
708 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
710 phba->link_state = LPFC_HBA_ERROR;
716 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
717 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
718 !(phba->lmt & LMT_1Gb)) ||
719 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
720 !(phba->lmt & LMT_2Gb)) ||
721 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
722 !(phba->lmt & LMT_4Gb)) ||
723 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
724 !(phba->lmt & LMT_8Gb)) ||
725 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
726 !(phba->lmt & LMT_10Gb)) ||
727 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
728 !(phba->lmt & LMT_16Gb)) ||
729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
730 !(phba->lmt & LMT_32Gb)) ||
731 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
732 !(phba->lmt & LMT_64Gb))) {
733 /* Reset link speed to auto */
734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
735 "1302 Invalid speed for this board:%d "
736 "Reset link speed to auto.\n",
737 phba->cfg_link_speed);
738 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
740 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
741 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
742 if (phba->sli_rev < LPFC_SLI_REV4)
743 lpfc_set_loopback_flag(phba);
744 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
745 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
747 "0498 Adapter failed to init, mbxCmd x%x "
748 "INIT_LINK, mbxStatus x%x\n",
749 mb->mbxCommand, mb->mbxStatus);
750 if (phba->sli_rev <= LPFC_SLI_REV3) {
751 /* Clear all interrupt enable conditions */
752 writel(0, phba->HCregaddr);
753 readl(phba->HCregaddr); /* flush */
754 /* Clear all pending interrupts */
755 writel(0xffffffff, phba->HAregaddr);
756 readl(phba->HAregaddr); /* flush */
758 phba->link_state = LPFC_HBA_ERROR;
759 if (rc != MBX_BUSY || flag == MBX_POLL)
760 mempool_free(pmb, phba->mbox_mem_pool);
763 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
764 if (flag == MBX_POLL)
765 mempool_free(pmb, phba->mbox_mem_pool);
771 * lpfc_hba_down_link - this routine downs the FC link
772 * @phba: pointer to lpfc hba data structure.
773 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
775 * This routine will issue the DOWN_LINK mailbox command call.
776 * It is available to other drivers through the lpfc_hba data
777 * structure for use to stop the link.
781 * Any other value - error
784 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
789 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
791 phba->link_state = LPFC_HBA_ERROR;
795 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 "0491 Adapter Link is disabled.\n");
797 lpfc_down_link(phba, pmb);
798 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
799 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
800 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
802 "2522 Adapter failed to issue DOWN_LINK"
803 " mbox command rc 0x%x\n", rc);
805 mempool_free(pmb, phba->mbox_mem_pool);
808 if (flag == MBX_POLL)
809 mempool_free(pmb, phba->mbox_mem_pool);
815 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
816 * @phba: pointer to lpfc HBA data structure.
818 * This routine will do LPFC uninitialization before the HBA is reset when
819 * bringing down the SLI Layer.
823 * Any other value - error.
826 lpfc_hba_down_prep(struct lpfc_hba *phba)
828 struct lpfc_vport **vports;
831 if (phba->sli_rev <= LPFC_SLI_REV3) {
832 /* Disable interrupts */
833 writel(0, phba->HCregaddr);
834 readl(phba->HCregaddr); /* flush */
837 if (phba->pport->load_flag & FC_UNLOADING)
838 lpfc_cleanup_discovery_resources(phba->pport);
840 vports = lpfc_create_vport_work_array(phba);
842 for (i = 0; i <= phba->max_vports &&
843 vports[i] != NULL; i++)
844 lpfc_cleanup_discovery_resources(vports[i]);
845 lpfc_destroy_vport_work_array(phba, vports);
851 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
852 * rspiocb which got deferred
854 * @phba: pointer to lpfc HBA data structure.
856 * This routine will cleanup completed slow path events after HBA is reset
857 * when bringing down the SLI Layer.
864 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
866 struct lpfc_iocbq *rspiocbq;
867 struct hbq_dmabuf *dmabuf;
868 struct lpfc_cq_event *cq_event;
870 spin_lock_irq(&phba->hbalock);
871 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
872 spin_unlock_irq(&phba->hbalock);
874 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
875 /* Get the response iocb from the head of work queue */
876 spin_lock_irq(&phba->hbalock);
877 list_remove_head(&phba->sli4_hba.sp_queue_event,
878 cq_event, struct lpfc_cq_event, list);
879 spin_unlock_irq(&phba->hbalock);
881 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
882 case CQE_CODE_COMPL_WQE:
883 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
885 lpfc_sli_release_iocbq(phba, rspiocbq);
887 case CQE_CODE_RECEIVE:
888 case CQE_CODE_RECEIVE_V1:
889 dmabuf = container_of(cq_event, struct hbq_dmabuf,
891 lpfc_in_buf_free(phba, &dmabuf->dbuf);
897 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
898 * @phba: pointer to lpfc HBA data structure.
900 * This routine will cleanup posted ELS buffers after the HBA is reset
901 * when bringing down the SLI Layer.
908 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
910 struct lpfc_sli *psli = &phba->sli;
911 struct lpfc_sli_ring *pring;
912 struct lpfc_dmabuf *mp, *next_mp;
916 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
917 lpfc_sli_hbqbuf_free_all(phba);
919 /* Cleanup preposted buffers on the ELS ring */
920 pring = &psli->sli3_ring[LPFC_ELS_RING];
921 spin_lock_irq(&phba->hbalock);
922 list_splice_init(&pring->postbufq, &buflist);
923 spin_unlock_irq(&phba->hbalock);
926 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
929 lpfc_mbuf_free(phba, mp->virt, mp->phys);
933 spin_lock_irq(&phba->hbalock);
934 pring->postbufq_cnt -= count;
935 spin_unlock_irq(&phba->hbalock);
940 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
941 * @phba: pointer to lpfc HBA data structure.
943 * This routine will cleanup the txcmplq after the HBA is reset when bringing
944 * down the SLI Layer.
950 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
952 struct lpfc_sli *psli = &phba->sli;
953 struct lpfc_queue *qp = NULL;
954 struct lpfc_sli_ring *pring;
955 LIST_HEAD(completions);
957 struct lpfc_iocbq *piocb, *next_iocb;
959 if (phba->sli_rev != LPFC_SLI_REV4) {
960 for (i = 0; i < psli->num_rings; i++) {
961 pring = &psli->sli3_ring[i];
962 spin_lock_irq(&phba->hbalock);
963 /* At this point in time the HBA is either reset or DOA
964 * Nothing should be on txcmplq as it will
967 list_splice_init(&pring->txcmplq, &completions);
968 pring->txcmplq_cnt = 0;
969 spin_unlock_irq(&phba->hbalock);
971 lpfc_sli_abort_iocb_ring(phba, pring);
973 /* Cancel all the IOCBs from the completions list */
974 lpfc_sli_cancel_iocbs(phba, &completions,
975 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
978 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
982 spin_lock_irq(&pring->ring_lock);
983 list_for_each_entry_safe(piocb, next_iocb,
984 &pring->txcmplq, list)
985 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
986 list_splice_init(&pring->txcmplq, &completions);
987 pring->txcmplq_cnt = 0;
988 spin_unlock_irq(&pring->ring_lock);
989 lpfc_sli_abort_iocb_ring(phba, pring);
991 /* Cancel all the IOCBs from the completions list */
992 lpfc_sli_cancel_iocbs(phba, &completions,
993 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
997 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
998 * @phba: pointer to lpfc HBA data structure.
1000 * This routine will do uninitialization after the HBA is reset when bring
1001 * down the SLI Layer.
1005 * Any other value - error.
1008 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1010 lpfc_hba_free_post_buf(phba);
1011 lpfc_hba_clean_txcmplq(phba);
1016 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1017 * @phba: pointer to lpfc HBA data structure.
1019 * This routine will do uninitialization after the HBA is reset when bring
1020 * down the SLI Layer.
1024 * Any other value - error.
1027 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1029 struct lpfc_io_buf *psb, *psb_next;
1030 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1031 struct lpfc_sli4_hdw_queue *qp;
1033 LIST_HEAD(nvme_aborts);
1034 LIST_HEAD(nvmet_aborts);
1035 struct lpfc_sglq *sglq_entry = NULL;
1039 lpfc_sli_hbqbuf_free_all(phba);
1040 lpfc_hba_clean_txcmplq(phba);
1042 /* At this point in time the HBA is either reset or DOA. Either
1043 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1044 * on the lpfc_els_sgl_list so that it can either be freed if the
1045 * driver is unloading or reposted if the driver is restarting
1049 /* sgl_list_lock required because worker thread uses this
1052 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1053 list_for_each_entry(sglq_entry,
1054 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1055 sglq_entry->state = SGL_FREED;
1057 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1058 &phba->sli4_hba.lpfc_els_sgl_list);
1061 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1063 /* abts_xxxx_buf_list_lock required because worker thread uses this
1066 spin_lock_irq(&phba->hbalock);
1068 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1069 qp = &phba->sli4_hba.hdwq[idx];
1071 spin_lock(&qp->abts_io_buf_list_lock);
1072 list_splice_init(&qp->lpfc_abts_io_buf_list,
1075 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1077 psb->status = IOSTAT_SUCCESS;
1080 spin_lock(&qp->io_buf_list_put_lock);
1081 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1082 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1083 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1084 qp->abts_scsi_io_bufs = 0;
1085 qp->abts_nvme_io_bufs = 0;
1086 spin_unlock(&qp->io_buf_list_put_lock);
1087 spin_unlock(&qp->abts_io_buf_list_lock);
1089 spin_unlock_irq(&phba->hbalock);
1091 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1092 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1093 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1095 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1096 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1097 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1098 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1102 lpfc_sli4_free_sp_events(phba);
1107 * lpfc_hba_down_post - Wrapper func for hba down post routine
1108 * @phba: pointer to lpfc HBA data structure.
1110 * This routine wraps the actual SLI3 or SLI4 routine for performing
1111 * uninitialization after the HBA is reset when bring down the SLI Layer.
1115 * Any other value - error.
1118 lpfc_hba_down_post(struct lpfc_hba *phba)
1120 return (*phba->lpfc_hba_down_post)(phba);
1124 * lpfc_hb_timeout - The HBA-timer timeout handler
1125 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1127 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1128 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1129 * work-port-events bitmap and the worker thread is notified. This timeout
1130 * event will be used by the worker thread to invoke the actual timeout
1131 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1132 * be performed in the timeout handler and the HBA timeout event bit shall
1133 * be cleared by the worker thread after it has taken the event bitmap out.
1136 lpfc_hb_timeout(struct timer_list *t)
1138 struct lpfc_hba *phba;
1139 uint32_t tmo_posted;
1140 unsigned long iflag;
1142 phba = from_timer(phba, t, hb_tmofunc);
1144 /* Check for heart beat timeout conditions */
1145 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1146 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1148 phba->pport->work_port_events |= WORKER_HB_TMO;
1149 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1151 /* Tell the worker thread there is work to do */
1153 lpfc_worker_wake_up(phba);
1158 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1159 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1161 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1162 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1163 * work-port-events bitmap and the worker thread is notified. This timeout
1164 * event will be used by the worker thread to invoke the actual timeout
1165 * handler routine, lpfc_rrq_handler. Any periodical operations will
1166 * be performed in the timeout handler and the RRQ timeout event bit shall
1167 * be cleared by the worker thread after it has taken the event bitmap out.
1170 lpfc_rrq_timeout(struct timer_list *t)
1172 struct lpfc_hba *phba;
1173 unsigned long iflag;
1175 phba = from_timer(phba, t, rrq_tmr);
1176 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1177 if (!(phba->pport->load_flag & FC_UNLOADING))
1178 phba->hba_flag |= HBA_RRQ_ACTIVE;
1180 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1181 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1183 if (!(phba->pport->load_flag & FC_UNLOADING))
1184 lpfc_worker_wake_up(phba);
1188 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1189 * @phba: pointer to lpfc hba data structure.
1190 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1192 * This is the callback function to the lpfc heart-beat mailbox command.
1193 * If configured, the lpfc driver issues the heart-beat mailbox command to
1194 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1195 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1196 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1197 * heart-beat outstanding state. Once the mailbox command comes back and
1198 * no error conditions detected, the heart-beat mailbox command timer is
1199 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1200 * state is cleared for the next heart-beat. If the timer expired with the
1201 * heart-beat outstanding state set, the driver will put the HBA offline.
1204 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1206 unsigned long drvr_flag;
1208 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1209 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1210 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1212 /* Check and reset heart-beat timer if necessary */
1213 mempool_free(pmboxq, phba->mbox_mem_pool);
1214 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1215 !(phba->link_state == LPFC_HBA_ERROR) &&
1216 !(phba->pport->load_flag & FC_UNLOADING))
1217 mod_timer(&phba->hb_tmofunc,
1219 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1224 * lpfc_idle_stat_delay_work - idle_stat tracking
1226 * This routine tracks per-cq idle_stat and determines polling decisions.
1232 lpfc_idle_stat_delay_work(struct work_struct *work)
1234 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1236 idle_stat_delay_work);
1237 struct lpfc_queue *cq;
1238 struct lpfc_sli4_hdw_queue *hdwq;
1239 struct lpfc_idle_stat *idle_stat;
1240 u32 i, idle_percent;
1241 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1243 if (phba->pport->load_flag & FC_UNLOADING)
1246 if (phba->link_state == LPFC_HBA_ERROR ||
1247 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1248 phba->cmf_active_mode != LPFC_CFG_OFF)
1251 for_each_present_cpu(i) {
1252 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1255 /* Skip if we've already handled this cq's primary CPU */
1259 idle_stat = &phba->sli4_hba.idle_stat[i];
1261 /* get_cpu_idle_time returns values as running counters. Thus,
1262 * to know the amount for this period, the prior counter values
1263 * need to be subtracted from the current counter values.
1264 * From there, the idle time stat can be calculated as a
1265 * percentage of 100 - the sum of the other consumption times.
1267 wall_idle = get_cpu_idle_time(i, &wall, 1);
1268 diff_idle = wall_idle - idle_stat->prev_idle;
1269 diff_wall = wall - idle_stat->prev_wall;
1271 if (diff_wall <= diff_idle)
1274 busy_time = diff_wall - diff_idle;
1276 idle_percent = div64_u64(100 * busy_time, diff_wall);
1277 idle_percent = 100 - idle_percent;
1279 if (idle_percent < 15)
1280 cq->poll_mode = LPFC_QUEUE_WORK;
1282 cq->poll_mode = LPFC_IRQ_POLL;
1284 idle_stat->prev_idle = wall_idle;
1285 idle_stat->prev_wall = wall;
1289 schedule_delayed_work(&phba->idle_stat_delay_work,
1290 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1294 lpfc_hb_eq_delay_work(struct work_struct *work)
1296 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1297 struct lpfc_hba, eq_delay_work);
1298 struct lpfc_eq_intr_info *eqi, *eqi_new;
1299 struct lpfc_queue *eq, *eq_next;
1300 unsigned char *ena_delay = NULL;
1304 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1307 if (phba->link_state == LPFC_HBA_ERROR ||
1308 phba->pport->fc_flag & FC_OFFLINE_MODE)
1311 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1316 for (i = 0; i < phba->cfg_irq_chann; i++) {
1317 /* Get the EQ corresponding to the IRQ vector */
1318 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1321 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1322 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1323 ena_delay[eq->last_cpu] = 1;
1327 for_each_present_cpu(i) {
1328 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1330 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1331 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1332 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1339 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1340 if (unlikely(eq->last_cpu != i)) {
1341 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1343 list_move_tail(&eq->cpu_list, &eqi_new->list);
1346 if (usdelay != eq->q_mode)
1347 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1355 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1356 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1360 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1361 * @phba: pointer to lpfc hba data structure.
1363 * For each heartbeat, this routine does some heuristic methods to adjust
1364 * XRI distribution. The goal is to fully utilize free XRIs.
1366 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1371 hwq_count = phba->cfg_hdw_queue;
1372 for (i = 0; i < hwq_count; i++) {
1373 /* Adjust XRIs in private pool */
1374 lpfc_adjust_pvt_pool_count(phba, i);
1376 /* Adjust high watermark */
1377 lpfc_adjust_high_watermark(phba, i);
1379 #ifdef LPFC_MXP_STAT
1380 /* Snapshot pbl, pvt and busy count */
1381 lpfc_snapshot_mxp(phba, i);
1387 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1388 * @phba: pointer to lpfc hba data structure.
1390 * If a HB mbox is not already in progrees, this routine will allocate
1391 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1392 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1395 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1397 LPFC_MBOXQ_t *pmboxq;
1400 /* Is a Heartbeat mbox already in progress */
1401 if (phba->hba_flag & HBA_HBEAT_INP)
1404 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1408 lpfc_heart_beat(phba, pmboxq);
1409 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1410 pmboxq->vport = phba->pport;
1411 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1413 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1414 mempool_free(pmboxq, phba->mbox_mem_pool);
1417 phba->hba_flag |= HBA_HBEAT_INP;
1423 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1424 * @phba: pointer to lpfc hba data structure.
1426 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1427 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1428 * of the value of lpfc_enable_hba_heartbeat.
1429 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1430 * try to issue a MBX_HEARTBEAT mbox command.
1433 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1435 if (phba->cfg_enable_hba_heartbeat)
1437 phba->hba_flag |= HBA_HBEAT_TMO;
1441 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1442 * @phba: pointer to lpfc hba data structure.
1444 * This is the actual HBA-timer timeout handler to be invoked by the worker
1445 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1446 * handler performs any periodic operations needed for the device. If such
1447 * periodic event has already been attended to either in the interrupt handler
1448 * or by processing slow-ring or fast-ring events within the HBA-timer
1449 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1450 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1451 * is configured and there is no heart-beat mailbox command outstanding, a
1452 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1453 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1457 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1459 struct lpfc_vport **vports;
1460 struct lpfc_dmabuf *buf_ptr;
1463 struct lpfc_sli *psli = &phba->sli;
1464 LIST_HEAD(completions);
1466 if (phba->cfg_xri_rebalancing) {
1467 /* Multi-XRI pools handler */
1468 lpfc_hb_mxp_handler(phba);
1471 vports = lpfc_create_vport_work_array(phba);
1473 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1474 lpfc_rcv_seq_check_edtov(vports[i]);
1475 lpfc_fdmi_change_check(vports[i]);
1477 lpfc_destroy_vport_work_array(phba, vports);
1479 if ((phba->link_state == LPFC_HBA_ERROR) ||
1480 (phba->pport->load_flag & FC_UNLOADING) ||
1481 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1484 if (phba->elsbuf_cnt &&
1485 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1486 spin_lock_irq(&phba->hbalock);
1487 list_splice_init(&phba->elsbuf, &completions);
1488 phba->elsbuf_cnt = 0;
1489 phba->elsbuf_prev_cnt = 0;
1490 spin_unlock_irq(&phba->hbalock);
1492 while (!list_empty(&completions)) {
1493 list_remove_head(&completions, buf_ptr,
1494 struct lpfc_dmabuf, list);
1495 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1499 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1501 /* If there is no heart beat outstanding, issue a heartbeat command */
1502 if (phba->cfg_enable_hba_heartbeat) {
1503 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1504 spin_lock_irq(&phba->pport->work_port_lock);
1505 if (time_after(phba->last_completion_time +
1506 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1508 spin_unlock_irq(&phba->pport->work_port_lock);
1509 if (phba->hba_flag & HBA_HBEAT_INP)
1510 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1512 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1515 spin_unlock_irq(&phba->pport->work_port_lock);
1517 /* Check if a MBX_HEARTBEAT is already in progress */
1518 if (phba->hba_flag & HBA_HBEAT_INP) {
1520 * If heart beat timeout called with HBA_HBEAT_INP set
1521 * we need to give the hb mailbox cmd a chance to
1524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1525 "0459 Adapter heartbeat still outstanding: "
1526 "last compl time was %d ms.\n",
1527 jiffies_to_msecs(jiffies
1528 - phba->last_completion_time));
1529 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1531 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1532 (list_empty(&psli->mboxq))) {
1534 retval = lpfc_issue_hb_mbox(phba);
1536 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1539 phba->skipped_hb = 0;
1540 } else if (time_before_eq(phba->last_completion_time,
1541 phba->skipped_hb)) {
1542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1543 "2857 Last completion time not "
1544 " updated in %d ms\n",
1545 jiffies_to_msecs(jiffies
1546 - phba->last_completion_time));
1548 phba->skipped_hb = jiffies;
1550 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1554 /* Check to see if we want to force a MBX_HEARTBEAT */
1555 if (phba->hba_flag & HBA_HBEAT_TMO) {
1556 retval = lpfc_issue_hb_mbox(phba);
1558 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1560 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1563 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1566 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1570 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1571 * @phba: pointer to lpfc hba data structure.
1573 * This routine is called to bring the HBA offline when HBA hardware error
1574 * other than Port Error 6 has been detected.
1577 lpfc_offline_eratt(struct lpfc_hba *phba)
1579 struct lpfc_sli *psli = &phba->sli;
1581 spin_lock_irq(&phba->hbalock);
1582 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1583 spin_unlock_irq(&phba->hbalock);
1584 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1587 lpfc_reset_barrier(phba);
1588 spin_lock_irq(&phba->hbalock);
1589 lpfc_sli_brdreset(phba);
1590 spin_unlock_irq(&phba->hbalock);
1591 lpfc_hba_down_post(phba);
1592 lpfc_sli_brdready(phba, HS_MBRDY);
1593 lpfc_unblock_mgmt_io(phba);
1594 phba->link_state = LPFC_HBA_ERROR;
1599 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1600 * @phba: pointer to lpfc hba data structure.
1602 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1603 * other than Port Error 6 has been detected.
1606 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1608 spin_lock_irq(&phba->hbalock);
1609 phba->link_state = LPFC_HBA_ERROR;
1610 spin_unlock_irq(&phba->hbalock);
1612 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1613 lpfc_sli_flush_io_rings(phba);
1615 lpfc_hba_down_post(phba);
1616 lpfc_unblock_mgmt_io(phba);
1620 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1621 * @phba: pointer to lpfc hba data structure.
1623 * This routine is invoked to handle the deferred HBA hardware error
1624 * conditions. This type of error is indicated by HBA by setting ER1
1625 * and another ER bit in the host status register. The driver will
1626 * wait until the ER1 bit clears before handling the error condition.
1629 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1631 uint32_t old_host_status = phba->work_hs;
1632 struct lpfc_sli *psli = &phba->sli;
1634 /* If the pci channel is offline, ignore possible errors,
1635 * since we cannot communicate with the pci card anyway.
1637 if (pci_channel_offline(phba->pcidev)) {
1638 spin_lock_irq(&phba->hbalock);
1639 phba->hba_flag &= ~DEFER_ERATT;
1640 spin_unlock_irq(&phba->hbalock);
1644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1645 "0479 Deferred Adapter Hardware Error "
1646 "Data: x%x x%x x%x\n",
1647 phba->work_hs, phba->work_status[0],
1648 phba->work_status[1]);
1650 spin_lock_irq(&phba->hbalock);
1651 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1652 spin_unlock_irq(&phba->hbalock);
1656 * Firmware stops when it triggred erratt. That could cause the I/Os
1657 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1658 * SCSI layer retry it after re-establishing link.
1660 lpfc_sli_abort_fcp_rings(phba);
1663 * There was a firmware error. Take the hba offline and then
1664 * attempt to restart it.
1666 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1669 /* Wait for the ER1 bit to clear.*/
1670 while (phba->work_hs & HS_FFER1) {
1672 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1673 phba->work_hs = UNPLUG_ERR ;
1676 /* If driver is unloading let the worker thread continue */
1677 if (phba->pport->load_flag & FC_UNLOADING) {
1684 * This is to ptrotect against a race condition in which
1685 * first write to the host attention register clear the
1686 * host status register.
1688 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1689 phba->work_hs = old_host_status & ~HS_FFER1;
1691 spin_lock_irq(&phba->hbalock);
1692 phba->hba_flag &= ~DEFER_ERATT;
1693 spin_unlock_irq(&phba->hbalock);
1694 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1695 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1699 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1701 struct lpfc_board_event_header board_event;
1702 struct Scsi_Host *shost;
1704 board_event.event_type = FC_REG_BOARD_EVENT;
1705 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1706 shost = lpfc_shost_from_vport(phba->pport);
1707 fc_host_post_vendor_event(shost, fc_get_event_number(),
1708 sizeof(board_event),
1709 (char *) &board_event,
1714 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1715 * @phba: pointer to lpfc hba data structure.
1717 * This routine is invoked to handle the following HBA hardware error
1719 * 1 - HBA error attention interrupt
1720 * 2 - DMA ring index out of range
1721 * 3 - Mailbox command came back as unknown
1724 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1726 struct lpfc_vport *vport = phba->pport;
1727 struct lpfc_sli *psli = &phba->sli;
1728 uint32_t event_data;
1729 unsigned long temperature;
1730 struct temp_event temp_event_data;
1731 struct Scsi_Host *shost;
1733 /* If the pci channel is offline, ignore possible errors,
1734 * since we cannot communicate with the pci card anyway.
1736 if (pci_channel_offline(phba->pcidev)) {
1737 spin_lock_irq(&phba->hbalock);
1738 phba->hba_flag &= ~DEFER_ERATT;
1739 spin_unlock_irq(&phba->hbalock);
1743 /* If resets are disabled then leave the HBA alone and return */
1744 if (!phba->cfg_enable_hba_reset)
1747 /* Send an internal error event to mgmt application */
1748 lpfc_board_errevt_to_mgmt(phba);
1750 if (phba->hba_flag & DEFER_ERATT)
1751 lpfc_handle_deferred_eratt(phba);
1753 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1754 if (phba->work_hs & HS_FFER6)
1755 /* Re-establishing Link */
1756 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1757 "1301 Re-establishing Link "
1758 "Data: x%x x%x x%x\n",
1759 phba->work_hs, phba->work_status[0],
1760 phba->work_status[1]);
1761 if (phba->work_hs & HS_FFER8)
1762 /* Device Zeroization */
1763 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1764 "2861 Host Authentication device "
1765 "zeroization Data:x%x x%x x%x\n",
1766 phba->work_hs, phba->work_status[0],
1767 phba->work_status[1]);
1769 spin_lock_irq(&phba->hbalock);
1770 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1771 spin_unlock_irq(&phba->hbalock);
1774 * Firmware stops when it triggled erratt with HS_FFER6.
1775 * That could cause the I/Os dropped by the firmware.
1776 * Error iocb (I/O) on txcmplq and let the SCSI layer
1777 * retry it after re-establishing link.
1779 lpfc_sli_abort_fcp_rings(phba);
1782 * There was a firmware error. Take the hba offline and then
1783 * attempt to restart it.
1785 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1787 lpfc_sli_brdrestart(phba);
1788 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1789 lpfc_unblock_mgmt_io(phba);
1792 lpfc_unblock_mgmt_io(phba);
1793 } else if (phba->work_hs & HS_CRIT_TEMP) {
1794 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1795 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1796 temp_event_data.event_code = LPFC_CRIT_TEMP;
1797 temp_event_data.data = (uint32_t)temperature;
1799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1800 "0406 Adapter maximum temperature exceeded "
1801 "(%ld), taking this port offline "
1802 "Data: x%x x%x x%x\n",
1803 temperature, phba->work_hs,
1804 phba->work_status[0], phba->work_status[1]);
1806 shost = lpfc_shost_from_vport(phba->pport);
1807 fc_host_post_vendor_event(shost, fc_get_event_number(),
1808 sizeof(temp_event_data),
1809 (char *) &temp_event_data,
1810 SCSI_NL_VID_TYPE_PCI
1811 | PCI_VENDOR_ID_EMULEX);
1813 spin_lock_irq(&phba->hbalock);
1814 phba->over_temp_state = HBA_OVER_TEMP;
1815 spin_unlock_irq(&phba->hbalock);
1816 lpfc_offline_eratt(phba);
1819 /* The if clause above forces this code path when the status
1820 * failure is a value other than FFER6. Do not call the offline
1821 * twice. This is the adapter hardware error path.
1823 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1824 "0457 Adapter Hardware Error "
1825 "Data: x%x x%x x%x\n",
1827 phba->work_status[0], phba->work_status[1]);
1829 event_data = FC_REG_DUMP_EVENT;
1830 shost = lpfc_shost_from_vport(vport);
1831 fc_host_post_vendor_event(shost, fc_get_event_number(),
1832 sizeof(event_data), (char *) &event_data,
1833 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1835 lpfc_offline_eratt(phba);
1841 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1842 * @phba: pointer to lpfc hba data structure.
1843 * @mbx_action: flag for mailbox shutdown action.
1844 * @en_rn_msg: send reset/port recovery message.
1845 * This routine is invoked to perform an SLI4 port PCI function reset in
1846 * response to port status register polling attention. It waits for port
1847 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1848 * During this process, interrupt vectors are freed and later requested
1849 * for handling possible port resource change.
1852 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1857 LPFC_MBOXQ_t *mboxq;
1859 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1860 LPFC_SLI_INTF_IF_TYPE_2) {
1862 * On error status condition, driver need to wait for port
1863 * ready before performing reset.
1865 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1870 /* need reset: attempt for port recovery */
1872 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1873 "2887 Reset Needed: Attempting Port "
1876 /* If we are no wait, the HBA has been reset and is not
1877 * functional, thus we should clear
1878 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1880 if (mbx_action == LPFC_MBX_NO_WAIT) {
1881 spin_lock_irq(&phba->hbalock);
1882 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1883 if (phba->sli.mbox_active) {
1884 mboxq = phba->sli.mbox_active;
1885 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1886 __lpfc_mbox_cmpl_put(phba, mboxq);
1887 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1888 phba->sli.mbox_active = NULL;
1890 spin_unlock_irq(&phba->hbalock);
1893 lpfc_offline_prep(phba, mbx_action);
1894 lpfc_sli_flush_io_rings(phba);
1896 /* release interrupt for possible resource change */
1897 lpfc_sli4_disable_intr(phba);
1898 rc = lpfc_sli_brdrestart(phba);
1900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1901 "6309 Failed to restart board\n");
1904 /* request and enable interrupt */
1905 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1906 if (intr_mode == LPFC_INTR_ERROR) {
1907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1908 "3175 Failed to enable interrupt\n");
1911 phba->intr_mode = intr_mode;
1912 rc = lpfc_online(phba);
1914 lpfc_unblock_mgmt_io(phba);
1920 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1921 * @phba: pointer to lpfc hba data structure.
1923 * This routine is invoked to handle the SLI4 HBA hardware error attention
1927 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1929 struct lpfc_vport *vport = phba->pport;
1930 uint32_t event_data;
1931 struct Scsi_Host *shost;
1933 struct lpfc_register portstat_reg = {0};
1934 uint32_t reg_err1, reg_err2;
1935 uint32_t uerrlo_reg, uemasklo_reg;
1936 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1937 bool en_rn_msg = true;
1938 struct temp_event temp_event_data;
1939 struct lpfc_register portsmphr_reg;
1942 /* If the pci channel is offline, ignore possible errors, since
1943 * we cannot communicate with the pci card anyway.
1945 if (pci_channel_offline(phba->pcidev)) {
1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1947 "3166 pci channel is offline\n");
1948 lpfc_sli4_offline_eratt(phba);
1952 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1953 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1955 case LPFC_SLI_INTF_IF_TYPE_0:
1956 pci_rd_rc1 = lpfc_readl(
1957 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1959 pci_rd_rc2 = lpfc_readl(
1960 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1962 /* consider PCI bus read error as pci_channel_offline */
1963 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1965 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1966 lpfc_sli4_offline_eratt(phba);
1969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1970 "7623 Checking UE recoverable");
1972 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1973 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1974 &portsmphr_reg.word0))
1977 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1979 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1980 LPFC_PORT_SEM_UE_RECOVERABLE)
1982 /*Sleep for 1Sec, before checking SEMAPHORE */
1986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1987 "4827 smphr_port_status x%x : Waited %dSec",
1988 smphr_port_status, i);
1990 /* Recoverable UE, reset the HBA device */
1991 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1992 LPFC_PORT_SEM_UE_RECOVERABLE) {
1993 for (i = 0; i < 20; i++) {
1995 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1996 &portsmphr_reg.word0) &&
1997 (LPFC_POST_STAGE_PORT_READY ==
1998 bf_get(lpfc_port_smphr_port_status,
2000 rc = lpfc_sli4_port_sta_fn_reset(phba,
2001 LPFC_MBX_NO_WAIT, en_rn_msg);
2004 lpfc_printf_log(phba, KERN_ERR,
2006 "4215 Failed to recover UE");
2011 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2012 "7624 Firmware not ready: Failing UE recovery,"
2013 " waited %dSec", i);
2014 phba->link_state = LPFC_HBA_ERROR;
2017 case LPFC_SLI_INTF_IF_TYPE_2:
2018 case LPFC_SLI_INTF_IF_TYPE_6:
2019 pci_rd_rc1 = lpfc_readl(
2020 phba->sli4_hba.u.if_type2.STATUSregaddr,
2021 &portstat_reg.word0);
2022 /* consider PCI bus read error as pci_channel_offline */
2023 if (pci_rd_rc1 == -EIO) {
2024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2025 "3151 PCI bus read access failure: x%x\n",
2026 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2027 lpfc_sli4_offline_eratt(phba);
2030 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2031 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2032 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2034 "2889 Port Overtemperature event, "
2035 "taking port offline Data: x%x x%x\n",
2036 reg_err1, reg_err2);
2038 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2039 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2040 temp_event_data.event_code = LPFC_CRIT_TEMP;
2041 temp_event_data.data = 0xFFFFFFFF;
2043 shost = lpfc_shost_from_vport(phba->pport);
2044 fc_host_post_vendor_event(shost, fc_get_event_number(),
2045 sizeof(temp_event_data),
2046 (char *)&temp_event_data,
2047 SCSI_NL_VID_TYPE_PCI
2048 | PCI_VENDOR_ID_EMULEX);
2050 spin_lock_irq(&phba->hbalock);
2051 phba->over_temp_state = HBA_OVER_TEMP;
2052 spin_unlock_irq(&phba->hbalock);
2053 lpfc_sli4_offline_eratt(phba);
2056 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2057 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2059 "3143 Port Down: Firmware Update "
2062 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2063 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2065 "3144 Port Down: Debug Dump\n");
2066 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2067 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2069 "3145 Port Down: Provisioning\n");
2071 /* If resets are disabled then leave the HBA alone and return */
2072 if (!phba->cfg_enable_hba_reset)
2075 /* Check port status register for function reset */
2076 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2079 /* don't report event on forced debug dump */
2080 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2081 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2086 /* fall through for not able to recover */
2087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2088 "3152 Unrecoverable error\n");
2089 phba->link_state = LPFC_HBA_ERROR;
2091 case LPFC_SLI_INTF_IF_TYPE_1:
2095 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2096 "3123 Report dump event to upper layer\n");
2097 /* Send an internal error event to mgmt application */
2098 lpfc_board_errevt_to_mgmt(phba);
2100 event_data = FC_REG_DUMP_EVENT;
2101 shost = lpfc_shost_from_vport(vport);
2102 fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 sizeof(event_data), (char *) &event_data,
2104 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2108 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2109 * @phba: pointer to lpfc HBA data structure.
2111 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2112 * routine from the API jump table function pointer from the lpfc_hba struct.
2116 * Any other value - error.
2119 lpfc_handle_eratt(struct lpfc_hba *phba)
2121 (*phba->lpfc_handle_eratt)(phba);
2125 * lpfc_handle_latt - The HBA link event handler
2126 * @phba: pointer to lpfc hba data structure.
2128 * This routine is invoked from the worker thread to handle a HBA host
2129 * attention link event. SLI3 only.
2132 lpfc_handle_latt(struct lpfc_hba *phba)
2134 struct lpfc_vport *vport = phba->pport;
2135 struct lpfc_sli *psli = &phba->sli;
2137 volatile uint32_t control;
2138 struct lpfc_dmabuf *mp;
2141 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2144 goto lpfc_handle_latt_err_exit;
2147 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2150 goto lpfc_handle_latt_free_pmb;
2153 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2156 goto lpfc_handle_latt_free_mp;
2159 /* Cleanup any outstanding ELS commands */
2160 lpfc_els_flush_all_cmd(phba);
2162 psli->slistat.link_event++;
2163 lpfc_read_topology(phba, pmb, mp);
2164 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2166 /* Block ELS IOCBs until we have processed this mbox command */
2167 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2168 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2169 if (rc == MBX_NOT_FINISHED) {
2171 goto lpfc_handle_latt_free_mbuf;
2174 /* Clear Link Attention in HA REG */
2175 spin_lock_irq(&phba->hbalock);
2176 writel(HA_LATT, phba->HAregaddr);
2177 readl(phba->HAregaddr); /* flush */
2178 spin_unlock_irq(&phba->hbalock);
2182 lpfc_handle_latt_free_mbuf:
2183 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2184 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2185 lpfc_handle_latt_free_mp:
2187 lpfc_handle_latt_free_pmb:
2188 mempool_free(pmb, phba->mbox_mem_pool);
2189 lpfc_handle_latt_err_exit:
2190 /* Enable Link attention interrupts */
2191 spin_lock_irq(&phba->hbalock);
2192 psli->sli_flag |= LPFC_PROCESS_LA;
2193 control = readl(phba->HCregaddr);
2194 control |= HC_LAINT_ENA;
2195 writel(control, phba->HCregaddr);
2196 readl(phba->HCregaddr); /* flush */
2198 /* Clear Link Attention in HA REG */
2199 writel(HA_LATT, phba->HAregaddr);
2200 readl(phba->HAregaddr); /* flush */
2201 spin_unlock_irq(&phba->hbalock);
2202 lpfc_linkdown(phba);
2203 phba->link_state = LPFC_HBA_ERROR;
2205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2206 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2212 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2213 * @phba: pointer to lpfc hba data structure.
2214 * @vpd: pointer to the vital product data.
2215 * @len: length of the vital product data in bytes.
2217 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2218 * an array of characters. In this routine, the ModelName, ProgramType, and
2219 * ModelDesc, etc. fields of the phba data structure will be populated.
2222 * 0 - pointer to the VPD passed in is NULL
2226 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2228 uint8_t lenlo, lenhi;
2238 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2239 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2240 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2242 while (!finished && (index < (len - 4))) {
2243 switch (vpd[index]) {
2251 i = ((((unsigned short)lenhi) << 8) + lenlo);
2260 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2261 if (Length > len - index)
2262 Length = len - index;
2263 while (Length > 0) {
2264 /* Look for Serial Number */
2265 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2272 phba->SerialNumber[j++] = vpd[index++];
2276 phba->SerialNumber[j] = 0;
2279 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2280 phba->vpd_flag |= VPD_MODEL_DESC;
2287 phba->ModelDesc[j++] = vpd[index++];
2291 phba->ModelDesc[j] = 0;
2294 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2295 phba->vpd_flag |= VPD_MODEL_NAME;
2302 phba->ModelName[j++] = vpd[index++];
2306 phba->ModelName[j] = 0;
2309 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2310 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2317 phba->ProgramType[j++] = vpd[index++];
2321 phba->ProgramType[j] = 0;
2324 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2325 phba->vpd_flag |= VPD_PORT;
2332 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2333 (phba->sli4_hba.pport_name_sta ==
2334 LPFC_SLI4_PPNAME_GET)) {
2338 phba->Port[j++] = vpd[index++];
2342 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2343 (phba->sli4_hba.pport_name_sta ==
2344 LPFC_SLI4_PPNAME_NON))
2371 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2372 * @phba: pointer to lpfc hba data structure.
2373 * @mdp: pointer to the data structure to hold the derived model name.
2374 * @descp: pointer to the data structure to hold the derived description.
2376 * This routine retrieves HBA's description based on its registered PCI device
2377 * ID. The @descp passed into this function points to an array of 256 chars. It
2378 * shall be returned with the model name, maximum speed, and the host bus type.
2379 * The @mdp passed into this function points to an array of 80 chars. When the
2380 * function returns, the @mdp will be filled with the model name.
2383 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2386 uint16_t dev_id = phba->pcidev->device;
2389 int oneConnect = 0; /* default is not a oneConnect */
2394 } m = {"<Unknown>", "", ""};
2396 if (mdp && mdp[0] != '\0'
2397 && descp && descp[0] != '\0')
2400 if (phba->lmt & LMT_64Gb)
2402 else if (phba->lmt & LMT_32Gb)
2404 else if (phba->lmt & LMT_16Gb)
2406 else if (phba->lmt & LMT_10Gb)
2408 else if (phba->lmt & LMT_8Gb)
2410 else if (phba->lmt & LMT_4Gb)
2412 else if (phba->lmt & LMT_2Gb)
2414 else if (phba->lmt & LMT_1Gb)
2422 case PCI_DEVICE_ID_FIREFLY:
2423 m = (typeof(m)){"LP6000", "PCI",
2424 "Obsolete, Unsupported Fibre Channel Adapter"};
2426 case PCI_DEVICE_ID_SUPERFLY:
2427 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2428 m = (typeof(m)){"LP7000", "PCI", ""};
2430 m = (typeof(m)){"LP7000E", "PCI", ""};
2431 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2433 case PCI_DEVICE_ID_DRAGONFLY:
2434 m = (typeof(m)){"LP8000", "PCI",
2435 "Obsolete, Unsupported Fibre Channel Adapter"};
2437 case PCI_DEVICE_ID_CENTAUR:
2438 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2439 m = (typeof(m)){"LP9002", "PCI", ""};
2441 m = (typeof(m)){"LP9000", "PCI", ""};
2442 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2444 case PCI_DEVICE_ID_RFLY:
2445 m = (typeof(m)){"LP952", "PCI",
2446 "Obsolete, Unsupported Fibre Channel Adapter"};
2448 case PCI_DEVICE_ID_PEGASUS:
2449 m = (typeof(m)){"LP9802", "PCI-X",
2450 "Obsolete, Unsupported Fibre Channel Adapter"};
2452 case PCI_DEVICE_ID_THOR:
2453 m = (typeof(m)){"LP10000", "PCI-X",
2454 "Obsolete, Unsupported Fibre Channel Adapter"};
2456 case PCI_DEVICE_ID_VIPER:
2457 m = (typeof(m)){"LPX1000", "PCI-X",
2458 "Obsolete, Unsupported Fibre Channel Adapter"};
2460 case PCI_DEVICE_ID_PFLY:
2461 m = (typeof(m)){"LP982", "PCI-X",
2462 "Obsolete, Unsupported Fibre Channel Adapter"};
2464 case PCI_DEVICE_ID_TFLY:
2465 m = (typeof(m)){"LP1050", "PCI-X",
2466 "Obsolete, Unsupported Fibre Channel Adapter"};
2468 case PCI_DEVICE_ID_HELIOS:
2469 m = (typeof(m)){"LP11000", "PCI-X2",
2470 "Obsolete, Unsupported Fibre Channel Adapter"};
2472 case PCI_DEVICE_ID_HELIOS_SCSP:
2473 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2474 "Obsolete, Unsupported Fibre Channel Adapter"};
2476 case PCI_DEVICE_ID_HELIOS_DCSP:
2477 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2478 "Obsolete, Unsupported Fibre Channel Adapter"};
2480 case PCI_DEVICE_ID_NEPTUNE:
2481 m = (typeof(m)){"LPe1000", "PCIe",
2482 "Obsolete, Unsupported Fibre Channel Adapter"};
2484 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2485 m = (typeof(m)){"LPe1000-SP", "PCIe",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2488 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2489 m = (typeof(m)){"LPe1002-SP", "PCIe",
2490 "Obsolete, Unsupported Fibre Channel Adapter"};
2492 case PCI_DEVICE_ID_BMID:
2493 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2495 case PCI_DEVICE_ID_BSMB:
2496 m = (typeof(m)){"LP111", "PCI-X2",
2497 "Obsolete, Unsupported Fibre Channel Adapter"};
2499 case PCI_DEVICE_ID_ZEPHYR:
2500 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2502 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2503 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2505 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2506 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2509 case PCI_DEVICE_ID_ZMID:
2510 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2512 case PCI_DEVICE_ID_ZSMB:
2513 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2515 case PCI_DEVICE_ID_LP101:
2516 m = (typeof(m)){"LP101", "PCI-X",
2517 "Obsolete, Unsupported Fibre Channel Adapter"};
2519 case PCI_DEVICE_ID_LP10000S:
2520 m = (typeof(m)){"LP10000-S", "PCI",
2521 "Obsolete, Unsupported Fibre Channel Adapter"};
2523 case PCI_DEVICE_ID_LP11000S:
2524 m = (typeof(m)){"LP11000-S", "PCI-X2",
2525 "Obsolete, Unsupported Fibre Channel Adapter"};
2527 case PCI_DEVICE_ID_LPE11000S:
2528 m = (typeof(m)){"LPe11000-S", "PCIe",
2529 "Obsolete, Unsupported Fibre Channel Adapter"};
2531 case PCI_DEVICE_ID_SAT:
2532 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2534 case PCI_DEVICE_ID_SAT_MID:
2535 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2537 case PCI_DEVICE_ID_SAT_SMB:
2538 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2540 case PCI_DEVICE_ID_SAT_DCSP:
2541 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2543 case PCI_DEVICE_ID_SAT_SCSP:
2544 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2546 case PCI_DEVICE_ID_SAT_S:
2547 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2549 case PCI_DEVICE_ID_HORNET:
2550 m = (typeof(m)){"LP21000", "PCIe",
2551 "Obsolete, Unsupported FCoE Adapter"};
2554 case PCI_DEVICE_ID_PROTEUS_VF:
2555 m = (typeof(m)){"LPev12000", "PCIe IOV",
2556 "Obsolete, Unsupported Fibre Channel Adapter"};
2558 case PCI_DEVICE_ID_PROTEUS_PF:
2559 m = (typeof(m)){"LPev12000", "PCIe IOV",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2562 case PCI_DEVICE_ID_PROTEUS_S:
2563 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2564 "Obsolete, Unsupported Fibre Channel Adapter"};
2566 case PCI_DEVICE_ID_TIGERSHARK:
2568 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2570 case PCI_DEVICE_ID_TOMCAT:
2572 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2574 case PCI_DEVICE_ID_FALCON:
2575 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2576 "EmulexSecure Fibre"};
2578 case PCI_DEVICE_ID_BALIUS:
2579 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2580 "Obsolete, Unsupported Fibre Channel Adapter"};
2582 case PCI_DEVICE_ID_LANCER_FC:
2583 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2585 case PCI_DEVICE_ID_LANCER_FC_VF:
2586 m = (typeof(m)){"LPe16000", "PCIe",
2587 "Obsolete, Unsupported Fibre Channel Adapter"};
2589 case PCI_DEVICE_ID_LANCER_FCOE:
2591 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2593 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2595 m = (typeof(m)){"OCe15100", "PCIe",
2596 "Obsolete, Unsupported FCoE"};
2598 case PCI_DEVICE_ID_LANCER_G6_FC:
2599 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2601 case PCI_DEVICE_ID_LANCER_G7_FC:
2602 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2604 case PCI_DEVICE_ID_LANCER_G7P_FC:
2605 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2607 case PCI_DEVICE_ID_SKYHAWK:
2608 case PCI_DEVICE_ID_SKYHAWK_VF:
2610 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2613 m = (typeof(m)){"Unknown", "", ""};
2617 if (mdp && mdp[0] == '\0')
2618 snprintf(mdp, 79,"%s", m.name);
2620 * oneConnect hba requires special processing, they are all initiators
2621 * and we put the port number on the end
2623 if (descp && descp[0] == '\0') {
2625 snprintf(descp, 255,
2626 "Emulex OneConnect %s, %s Initiator %s",
2629 else if (max_speed == 0)
2630 snprintf(descp, 255,
2632 m.name, m.bus, m.function);
2634 snprintf(descp, 255,
2635 "Emulex %s %d%s %s %s",
2636 m.name, max_speed, (GE) ? "GE" : "Gb",
2642 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2643 * @phba: pointer to lpfc hba data structure.
2644 * @pring: pointer to a IOCB ring.
2645 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2647 * This routine posts a given number of IOCBs with the associated DMA buffer
2648 * descriptors specified by the cnt argument to the given IOCB ring.
2651 * The number of IOCBs NOT able to be posted to the IOCB ring.
2654 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2657 struct lpfc_iocbq *iocb;
2658 struct lpfc_dmabuf *mp1, *mp2;
2660 cnt += pring->missbufcnt;
2662 /* While there are buffers to post */
2664 /* Allocate buffer for command iocb */
2665 iocb = lpfc_sli_get_iocbq(phba);
2667 pring->missbufcnt = cnt;
2672 /* 2 buffers can be posted per command */
2673 /* Allocate buffer to post */
2674 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2676 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2677 if (!mp1 || !mp1->virt) {
2679 lpfc_sli_release_iocbq(phba, iocb);
2680 pring->missbufcnt = cnt;
2684 INIT_LIST_HEAD(&mp1->list);
2685 /* Allocate buffer to post */
2687 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2689 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2691 if (!mp2 || !mp2->virt) {
2693 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2695 lpfc_sli_release_iocbq(phba, iocb);
2696 pring->missbufcnt = cnt;
2700 INIT_LIST_HEAD(&mp2->list);
2705 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2706 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2707 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2708 icmd->ulpBdeCount = 1;
2711 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2712 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2713 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2715 icmd->ulpBdeCount = 2;
2718 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2721 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2723 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2727 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2731 lpfc_sli_release_iocbq(phba, iocb);
2732 pring->missbufcnt = cnt;
2735 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2737 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2739 pring->missbufcnt = 0;
2744 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2745 * @phba: pointer to lpfc hba data structure.
2747 * This routine posts initial receive IOCB buffers to the ELS ring. The
2748 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2749 * set to 64 IOCBs. SLI3 only.
2752 * 0 - success (currently always success)
2755 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2757 struct lpfc_sli *psli = &phba->sli;
2759 /* Ring 0, ELS / CT buffers */
2760 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2761 /* Ring 2 - FCP no buffers needed */
2766 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2769 * lpfc_sha_init - Set up initial array of hash table entries
2770 * @HashResultPointer: pointer to an array as hash table.
2772 * This routine sets up the initial values to the array of hash table entries
2776 lpfc_sha_init(uint32_t * HashResultPointer)
2778 HashResultPointer[0] = 0x67452301;
2779 HashResultPointer[1] = 0xEFCDAB89;
2780 HashResultPointer[2] = 0x98BADCFE;
2781 HashResultPointer[3] = 0x10325476;
2782 HashResultPointer[4] = 0xC3D2E1F0;
2786 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2787 * @HashResultPointer: pointer to an initial/result hash table.
2788 * @HashWorkingPointer: pointer to an working hash table.
2790 * This routine iterates an initial hash table pointed by @HashResultPointer
2791 * with the values from the working hash table pointeed by @HashWorkingPointer.
2792 * The results are putting back to the initial hash table, returned through
2793 * the @HashResultPointer as the result hash table.
2796 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2800 uint32_t A, B, C, D, E;
2803 HashWorkingPointer[t] =
2805 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2807 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2808 } while (++t <= 79);
2810 A = HashResultPointer[0];
2811 B = HashResultPointer[1];
2812 C = HashResultPointer[2];
2813 D = HashResultPointer[3];
2814 E = HashResultPointer[4];
2818 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2819 } else if (t < 40) {
2820 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2821 } else if (t < 60) {
2822 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2824 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2826 TEMP += S(5, A) + E + HashWorkingPointer[t];
2832 } while (++t <= 79);
2834 HashResultPointer[0] += A;
2835 HashResultPointer[1] += B;
2836 HashResultPointer[2] += C;
2837 HashResultPointer[3] += D;
2838 HashResultPointer[4] += E;
2843 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2844 * @RandomChallenge: pointer to the entry of host challenge random number array.
2845 * @HashWorking: pointer to the entry of the working hash array.
2847 * This routine calculates the working hash array referred by @HashWorking
2848 * from the challenge random numbers associated with the host, referred by
2849 * @RandomChallenge. The result is put into the entry of the working hash
2850 * array and returned by reference through @HashWorking.
2853 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2855 *HashWorking = (*RandomChallenge ^ *HashWorking);
2859 * lpfc_hba_init - Perform special handling for LC HBA initialization
2860 * @phba: pointer to lpfc hba data structure.
2861 * @hbainit: pointer to an array of unsigned 32-bit integers.
2863 * This routine performs the special handling for LC HBA initialization.
2866 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2869 uint32_t *HashWorking;
2870 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2872 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2876 HashWorking[0] = HashWorking[78] = *pwwnn++;
2877 HashWorking[1] = HashWorking[79] = *pwwnn;
2879 for (t = 0; t < 7; t++)
2880 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2882 lpfc_sha_init(hbainit);
2883 lpfc_sha_iterate(hbainit, HashWorking);
2888 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2889 * @vport: pointer to a virtual N_Port data structure.
2891 * This routine performs the necessary cleanups before deleting the @vport.
2892 * It invokes the discovery state machine to perform necessary state
2893 * transitions and to release the ndlps associated with the @vport. Note,
2894 * the physical port is treated as @vport 0.
2897 lpfc_cleanup(struct lpfc_vport *vport)
2899 struct lpfc_hba *phba = vport->phba;
2900 struct lpfc_nodelist *ndlp, *next_ndlp;
2903 if (phba->link_state > LPFC_LINK_DOWN)
2904 lpfc_port_link_failure(vport);
2906 /* Clean up VMID resources */
2907 if (lpfc_is_vmid_enabled(phba))
2908 lpfc_vmid_vport_cleanup(vport);
2910 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2911 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2912 ndlp->nlp_DID == Fabric_DID) {
2913 /* Just free up ndlp with Fabric_DID for vports */
2918 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2919 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2924 /* Fabric Ports not in UNMAPPED state are cleaned up in the
2927 if (ndlp->nlp_type & NLP_FABRIC &&
2928 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2929 lpfc_disc_state_machine(vport, ndlp, NULL,
2930 NLP_EVT_DEVICE_RECOVERY);
2932 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2933 lpfc_disc_state_machine(vport, ndlp, NULL,
2937 /* At this point, ALL ndlp's should be gone
2938 * because of the previous NLP_EVT_DEVICE_RM.
2939 * Lets wait for this to happen, if needed.
2941 while (!list_empty(&vport->fc_nodes)) {
2943 lpfc_printf_vlog(vport, KERN_ERR,
2945 "0233 Nodelist not empty\n");
2946 list_for_each_entry_safe(ndlp, next_ndlp,
2947 &vport->fc_nodes, nlp_listp) {
2948 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2950 "0282 did:x%x ndlp:x%px "
2951 "refcnt:%d xflags x%x nflag x%x\n",
2952 ndlp->nlp_DID, (void *)ndlp,
2953 kref_read(&ndlp->kref),
2954 ndlp->fc4_xpt_flags,
2960 /* Wait for any activity on ndlps to settle */
2963 lpfc_cleanup_vports_rrqs(vport, NULL);
2967 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2968 * @vport: pointer to a virtual N_Port data structure.
2970 * This routine stops all the timers associated with a @vport. This function
2971 * is invoked before disabling or deleting a @vport. Note that the physical
2972 * port is treated as @vport 0.
2975 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2977 del_timer_sync(&vport->els_tmofunc);
2978 del_timer_sync(&vport->delayed_disc_tmo);
2979 lpfc_can_disctmo(vport);
2984 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2985 * @phba: pointer to lpfc hba data structure.
2987 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2988 * caller of this routine should already hold the host lock.
2991 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2993 /* Clear pending FCF rediscovery wait flag */
2994 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2996 /* Now, try to stop the timer */
2997 del_timer(&phba->fcf.redisc_wait);
3001 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3002 * @phba: pointer to lpfc hba data structure.
3004 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3005 * checks whether the FCF rediscovery wait timer is pending with the host
3006 * lock held before proceeding with disabling the timer and clearing the
3007 * wait timer pendig flag.
3010 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3012 spin_lock_irq(&phba->hbalock);
3013 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3014 /* FCF rediscovery timer already fired or stopped */
3015 spin_unlock_irq(&phba->hbalock);
3018 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3019 /* Clear failover in progress flags */
3020 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3021 spin_unlock_irq(&phba->hbalock);
3025 * lpfc_cmf_stop - Stop CMF processing
3026 * @phba: pointer to lpfc hba data structure.
3028 * This is called when the link goes down or if CMF mode is turned OFF.
3029 * It is also called when going offline or unloaded just before the
3030 * congestion info buffer is unregistered.
3033 lpfc_cmf_stop(struct lpfc_hba *phba)
3036 struct lpfc_cgn_stat *cgs;
3038 /* We only do something if CMF is enabled */
3039 if (!phba->sli4_hba.pc_sli4_params.cmf)
3042 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3043 "6221 Stop CMF / Cancel Timer\n");
3045 /* Cancel the CMF timer */
3046 hrtimer_cancel(&phba->cmf_timer);
3048 /* Zero CMF counters */
3049 atomic_set(&phba->cmf_busy, 0);
3050 for_each_present_cpu(cpu) {
3051 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3052 atomic64_set(&cgs->total_bytes, 0);
3053 atomic64_set(&cgs->rcv_bytes, 0);
3054 atomic_set(&cgs->rx_io_cnt, 0);
3055 atomic64_set(&cgs->rx_latency, 0);
3057 atomic_set(&phba->cmf_bw_wait, 0);
3059 /* Resume any blocked IO - Queue unblock on workqueue */
3060 queue_work(phba->wq, &phba->unblock_request_work);
3063 static inline uint64_t
3064 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3066 uint64_t rate = lpfc_sli_port_speed_get(phba);
3068 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3072 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3074 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3075 "6223 Signal CMF init\n");
3077 /* Use the new fc_linkspeed to recalculate */
3078 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3079 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3080 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3081 phba->cmf_interval_rate, 1000);
3082 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3084 /* This is a signal to firmware to sync up CMF BW with link speed */
3085 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3089 * lpfc_cmf_start - Start CMF processing
3090 * @phba: pointer to lpfc hba data structure.
3092 * This is called when the link comes up or if CMF mode is turned OFF
3093 * to Monitor or Managed.
3096 lpfc_cmf_start(struct lpfc_hba *phba)
3098 struct lpfc_cgn_stat *cgs;
3101 /* We only do something if CMF is enabled */
3102 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3103 phba->cmf_active_mode == LPFC_CFG_OFF)
3106 /* Reinitialize congestion buffer info */
3107 lpfc_init_congestion_buf(phba);
3109 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3110 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3111 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3112 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3114 atomic_set(&phba->cmf_busy, 0);
3115 for_each_present_cpu(cpu) {
3116 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3117 atomic64_set(&cgs->total_bytes, 0);
3118 atomic64_set(&cgs->rcv_bytes, 0);
3119 atomic_set(&cgs->rx_io_cnt, 0);
3120 atomic64_set(&cgs->rx_latency, 0);
3122 phba->cmf_latency.tv_sec = 0;
3123 phba->cmf_latency.tv_nsec = 0;
3125 lpfc_cmf_signal_init(phba);
3127 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3128 "6222 Start CMF / Timer\n");
3130 phba->cmf_timer_cnt = 0;
3131 hrtimer_start(&phba->cmf_timer,
3132 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3134 /* Setup for latency check in IO cmpl routines */
3135 ktime_get_real_ts64(&phba->cmf_latency);
3137 atomic_set(&phba->cmf_bw_wait, 0);
3138 atomic_set(&phba->cmf_stop_io, 0);
3142 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3143 * @phba: pointer to lpfc hba data structure.
3145 * This routine stops all the timers associated with a HBA. This function is
3146 * invoked before either putting a HBA offline or unloading the driver.
3149 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3152 lpfc_stop_vport_timers(phba->pport);
3153 cancel_delayed_work_sync(&phba->eq_delay_work);
3154 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3155 del_timer_sync(&phba->sli.mbox_tmo);
3156 del_timer_sync(&phba->fabric_block_timer);
3157 del_timer_sync(&phba->eratt_poll);
3158 del_timer_sync(&phba->hb_tmofunc);
3159 if (phba->sli_rev == LPFC_SLI_REV4) {
3160 del_timer_sync(&phba->rrq_tmr);
3161 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3163 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3165 switch (phba->pci_dev_grp) {
3166 case LPFC_PCI_DEV_LP:
3167 /* Stop any LightPulse device specific driver timers */
3168 del_timer_sync(&phba->fcp_poll_timer);
3170 case LPFC_PCI_DEV_OC:
3171 /* Stop any OneConnect device specific driver timers */
3172 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3176 "0297 Invalid device group (x%x)\n",
3184 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3185 * @phba: pointer to lpfc hba data structure.
3186 * @mbx_action: flag for mailbox no wait action.
3188 * This routine marks a HBA's management interface as blocked. Once the HBA's
3189 * management interface is marked as blocked, all the user space access to
3190 * the HBA, whether they are from sysfs interface or libdfc interface will
3191 * all be blocked. The HBA is set to block the management interface when the
3192 * driver prepares the HBA interface for online or offline.
3195 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3197 unsigned long iflag;
3198 uint8_t actcmd = MBX_HEARTBEAT;
3199 unsigned long timeout;
3201 spin_lock_irqsave(&phba->hbalock, iflag);
3202 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3203 spin_unlock_irqrestore(&phba->hbalock, iflag);
3204 if (mbx_action == LPFC_MBX_NO_WAIT)
3206 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3207 spin_lock_irqsave(&phba->hbalock, iflag);
3208 if (phba->sli.mbox_active) {
3209 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3210 /* Determine how long we might wait for the active mailbox
3211 * command to be gracefully completed by firmware.
3213 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3214 phba->sli.mbox_active) * 1000) + jiffies;
3216 spin_unlock_irqrestore(&phba->hbalock, iflag);
3218 /* Wait for the outstnading mailbox command to complete */
3219 while (phba->sli.mbox_active) {
3220 /* Check active mailbox complete status every 2ms */
3222 if (time_after(jiffies, timeout)) {
3223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3224 "2813 Mgmt IO is Blocked %x "
3225 "- mbox cmd %x still active\n",
3226 phba->sli.sli_flag, actcmd);
3233 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3234 * @phba: pointer to lpfc hba data structure.
3236 * Allocate RPIs for all active remote nodes. This is needed whenever
3237 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3238 * is to fixup the temporary rpi assignments.
3241 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3243 struct lpfc_nodelist *ndlp, *next_ndlp;
3244 struct lpfc_vport **vports;
3247 if (phba->sli_rev != LPFC_SLI_REV4)
3250 vports = lpfc_create_vport_work_array(phba);
3254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3255 if (vports[i]->load_flag & FC_UNLOADING)
3258 list_for_each_entry_safe(ndlp, next_ndlp,
3259 &vports[i]->fc_nodes,
3261 rpi = lpfc_sli4_alloc_rpi(phba);
3262 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3263 /* TODO print log? */
3266 ndlp->nlp_rpi = rpi;
3267 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3268 LOG_NODE | LOG_DISCOVERY,
3269 "0009 Assign RPI x%x to ndlp x%px "
3270 "DID:x%06x flg:x%x\n",
3271 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3275 lpfc_destroy_vport_work_array(phba, vports);
3279 * lpfc_create_expedite_pool - create expedite pool
3280 * @phba: pointer to lpfc hba data structure.
3282 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3283 * to expedite pool. Mark them as expedite.
3285 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3287 struct lpfc_sli4_hdw_queue *qp;
3288 struct lpfc_io_buf *lpfc_ncmd;
3289 struct lpfc_io_buf *lpfc_ncmd_next;
3290 struct lpfc_epd_pool *epd_pool;
3291 unsigned long iflag;
3293 epd_pool = &phba->epd_pool;
3294 qp = &phba->sli4_hba.hdwq[0];
3296 spin_lock_init(&epd_pool->lock);
3297 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3298 spin_lock(&epd_pool->lock);
3299 INIT_LIST_HEAD(&epd_pool->list);
3300 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3301 &qp->lpfc_io_buf_list_put, list) {
3302 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3303 lpfc_ncmd->expedite = true;
3306 if (epd_pool->count >= XRI_BATCH)
3309 spin_unlock(&epd_pool->lock);
3310 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3314 * lpfc_destroy_expedite_pool - destroy expedite pool
3315 * @phba: pointer to lpfc hba data structure.
3317 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3318 * of HWQ 0. Clear the mark.
3320 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3322 struct lpfc_sli4_hdw_queue *qp;
3323 struct lpfc_io_buf *lpfc_ncmd;
3324 struct lpfc_io_buf *lpfc_ncmd_next;
3325 struct lpfc_epd_pool *epd_pool;
3326 unsigned long iflag;
3328 epd_pool = &phba->epd_pool;
3329 qp = &phba->sli4_hba.hdwq[0];
3331 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3332 spin_lock(&epd_pool->lock);
3333 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3334 &epd_pool->list, list) {
3335 list_move_tail(&lpfc_ncmd->list,
3336 &qp->lpfc_io_buf_list_put);
3337 lpfc_ncmd->flags = false;
3341 spin_unlock(&epd_pool->lock);
3342 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3346 * lpfc_create_multixri_pools - create multi-XRI pools
3347 * @phba: pointer to lpfc hba data structure.
3349 * This routine initialize public, private per HWQ. Then, move XRIs from
3350 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3353 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3358 struct lpfc_io_buf *lpfc_ncmd;
3359 struct lpfc_io_buf *lpfc_ncmd_next;
3360 unsigned long iflag;
3361 struct lpfc_sli4_hdw_queue *qp;
3362 struct lpfc_multixri_pool *multixri_pool;
3363 struct lpfc_pbl_pool *pbl_pool;
3364 struct lpfc_pvt_pool *pvt_pool;
3366 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3367 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3368 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3369 phba->sli4_hba.io_xri_cnt);
3371 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3372 lpfc_create_expedite_pool(phba);
3374 hwq_count = phba->cfg_hdw_queue;
3375 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3377 for (i = 0; i < hwq_count; i++) {
3378 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3380 if (!multixri_pool) {
3381 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3382 "1238 Failed to allocate memory for "
3385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3386 lpfc_destroy_expedite_pool(phba);
3390 qp = &phba->sli4_hba.hdwq[j];
3391 kfree(qp->p_multixri_pool);
3394 phba->cfg_xri_rebalancing = 0;
3398 qp = &phba->sli4_hba.hdwq[i];
3399 qp->p_multixri_pool = multixri_pool;
3401 multixri_pool->xri_limit = count_per_hwq;
3402 multixri_pool->rrb_next_hwqid = i;
3404 /* Deal with public free xri pool */
3405 pbl_pool = &multixri_pool->pbl_pool;
3406 spin_lock_init(&pbl_pool->lock);
3407 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3408 spin_lock(&pbl_pool->lock);
3409 INIT_LIST_HEAD(&pbl_pool->list);
3410 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3411 &qp->lpfc_io_buf_list_put, list) {
3412 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3417 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3418 pbl_pool->count, i);
3419 spin_unlock(&pbl_pool->lock);
3420 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3422 /* Deal with private free xri pool */
3423 pvt_pool = &multixri_pool->pvt_pool;
3424 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3425 pvt_pool->low_watermark = XRI_BATCH;
3426 spin_lock_init(&pvt_pool->lock);
3427 spin_lock_irqsave(&pvt_pool->lock, iflag);
3428 INIT_LIST_HEAD(&pvt_pool->list);
3429 pvt_pool->count = 0;
3430 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3435 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3436 * @phba: pointer to lpfc hba data structure.
3438 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3440 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3444 struct lpfc_io_buf *lpfc_ncmd;
3445 struct lpfc_io_buf *lpfc_ncmd_next;
3446 unsigned long iflag;
3447 struct lpfc_sli4_hdw_queue *qp;
3448 struct lpfc_multixri_pool *multixri_pool;
3449 struct lpfc_pbl_pool *pbl_pool;
3450 struct lpfc_pvt_pool *pvt_pool;
3452 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3453 lpfc_destroy_expedite_pool(phba);
3455 if (!(phba->pport->load_flag & FC_UNLOADING))
3456 lpfc_sli_flush_io_rings(phba);
3458 hwq_count = phba->cfg_hdw_queue;
3460 for (i = 0; i < hwq_count; i++) {
3461 qp = &phba->sli4_hba.hdwq[i];
3462 multixri_pool = qp->p_multixri_pool;
3466 qp->p_multixri_pool = NULL;
3468 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3470 /* Deal with public free xri pool */
3471 pbl_pool = &multixri_pool->pbl_pool;
3472 spin_lock(&pbl_pool->lock);
3474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3475 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3476 pbl_pool->count, i);
3478 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3479 &pbl_pool->list, list) {
3480 list_move_tail(&lpfc_ncmd->list,
3481 &qp->lpfc_io_buf_list_put);
3486 INIT_LIST_HEAD(&pbl_pool->list);
3487 pbl_pool->count = 0;
3489 spin_unlock(&pbl_pool->lock);
3491 /* Deal with private free xri pool */
3492 pvt_pool = &multixri_pool->pvt_pool;
3493 spin_lock(&pvt_pool->lock);
3495 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3496 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3497 pvt_pool->count, i);
3499 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3500 &pvt_pool->list, list) {
3501 list_move_tail(&lpfc_ncmd->list,
3502 &qp->lpfc_io_buf_list_put);
3507 INIT_LIST_HEAD(&pvt_pool->list);
3508 pvt_pool->count = 0;
3510 spin_unlock(&pvt_pool->lock);
3511 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3513 kfree(multixri_pool);
3518 * lpfc_online - Initialize and bring a HBA online
3519 * @phba: pointer to lpfc hba data structure.
3521 * This routine initializes the HBA and brings a HBA online. During this
3522 * process, the management interface is blocked to prevent user space access
3523 * to the HBA interfering with the driver initialization.
3530 lpfc_online(struct lpfc_hba *phba)
3532 struct lpfc_vport *vport;
3533 struct lpfc_vport **vports;
3535 bool vpis_cleared = false;
3539 vport = phba->pport;
3541 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3544 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3545 "0458 Bring Adapter online\n");
3547 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3549 if (phba->sli_rev == LPFC_SLI_REV4) {
3550 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3551 lpfc_unblock_mgmt_io(phba);
3554 spin_lock_irq(&phba->hbalock);
3555 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3556 vpis_cleared = true;
3557 spin_unlock_irq(&phba->hbalock);
3559 /* Reestablish the local initiator port.
3560 * The offline process destroyed the previous lport.
3562 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3563 !phba->nvmet_support) {
3564 error = lpfc_nvme_create_localport(phba->pport);
3566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3567 "6132 NVME restore reg failed "
3568 "on nvmei error x%x\n", error);
3571 lpfc_sli_queue_init(phba);
3572 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3573 lpfc_unblock_mgmt_io(phba);
3578 vports = lpfc_create_vport_work_array(phba);
3579 if (vports != NULL) {
3580 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3581 struct Scsi_Host *shost;
3582 shost = lpfc_shost_from_vport(vports[i]);
3583 spin_lock_irq(shost->host_lock);
3584 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3585 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3586 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3587 if (phba->sli_rev == LPFC_SLI_REV4) {
3588 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3589 if ((vpis_cleared) &&
3590 (vports[i]->port_type !=
3591 LPFC_PHYSICAL_PORT))
3594 spin_unlock_irq(shost->host_lock);
3597 lpfc_destroy_vport_work_array(phba, vports);
3599 if (phba->cfg_xri_rebalancing)
3600 lpfc_create_multixri_pools(phba);
3602 lpfc_cpuhp_add(phba);
3604 lpfc_unblock_mgmt_io(phba);
3609 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3610 * @phba: pointer to lpfc hba data structure.
3612 * This routine marks a HBA's management interface as not blocked. Once the
3613 * HBA's management interface is marked as not blocked, all the user space
3614 * access to the HBA, whether they are from sysfs interface or libdfc
3615 * interface will be allowed. The HBA is set to block the management interface
3616 * when the driver prepares the HBA interface for online or offline and then
3617 * set to unblock the management interface afterwards.
3620 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3622 unsigned long iflag;
3624 spin_lock_irqsave(&phba->hbalock, iflag);
3625 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3626 spin_unlock_irqrestore(&phba->hbalock, iflag);
3630 * lpfc_offline_prep - Prepare a HBA to be brought offline
3631 * @phba: pointer to lpfc hba data structure.
3632 * @mbx_action: flag for mailbox shutdown action.
3634 * This routine is invoked to prepare a HBA to be brought offline. It performs
3635 * unregistration login to all the nodes on all vports and flushes the mailbox
3636 * queue to make it ready to be brought offline.
3639 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3641 struct lpfc_vport *vport = phba->pport;
3642 struct lpfc_nodelist *ndlp, *next_ndlp;
3643 struct lpfc_vport **vports;
3644 struct Scsi_Host *shost;
3647 if (vport->fc_flag & FC_OFFLINE_MODE)
3650 lpfc_block_mgmt_io(phba, mbx_action);
3652 lpfc_linkdown(phba);
3654 /* Issue an unreg_login to all nodes on all vports */
3655 vports = lpfc_create_vport_work_array(phba);
3656 if (vports != NULL) {
3657 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3658 if (vports[i]->load_flag & FC_UNLOADING)
3660 shost = lpfc_shost_from_vport(vports[i]);
3661 spin_lock_irq(shost->host_lock);
3662 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3663 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3664 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3665 spin_unlock_irq(shost->host_lock);
3667 shost = lpfc_shost_from_vport(vports[i]);
3668 list_for_each_entry_safe(ndlp, next_ndlp,
3669 &vports[i]->fc_nodes,
3672 spin_lock_irq(&ndlp->lock);
3673 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3674 spin_unlock_irq(&ndlp->lock);
3676 lpfc_unreg_rpi(vports[i], ndlp);
3678 * Whenever an SLI4 port goes offline, free the
3679 * RPI. Get a new RPI when the adapter port
3680 * comes back online.
3682 if (phba->sli_rev == LPFC_SLI_REV4) {
3683 lpfc_printf_vlog(vports[i], KERN_INFO,
3684 LOG_NODE | LOG_DISCOVERY,
3685 "0011 Free RPI x%x on "
3686 "ndlp: x%px did x%x\n",
3687 ndlp->nlp_rpi, ndlp,
3689 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3690 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3693 if (ndlp->nlp_type & NLP_FABRIC) {
3694 lpfc_disc_state_machine(vports[i], ndlp,
3695 NULL, NLP_EVT_DEVICE_RECOVERY);
3697 /* Don't remove the node unless the node
3698 * has been unregistered with the
3699 * transport, and we're not in recovery
3700 * before dev_loss_tmo triggered.
3701 * Otherwise, let dev_loss take care of
3704 if (!(ndlp->save_flags &
3705 NLP_IN_RECOV_POST_DEV_LOSS) &&
3706 !(ndlp->fc4_xpt_flags &
3707 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3708 lpfc_disc_state_machine
3716 lpfc_destroy_vport_work_array(phba, vports);
3718 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3721 flush_workqueue(phba->wq);
3725 * lpfc_offline - Bring a HBA offline
3726 * @phba: pointer to lpfc hba data structure.
3728 * This routine actually brings a HBA offline. It stops all the timers
3729 * associated with the HBA, brings down the SLI layer, and eventually
3730 * marks the HBA as in offline state for the upper layer protocol.
3733 lpfc_offline(struct lpfc_hba *phba)
3735 struct Scsi_Host *shost;
3736 struct lpfc_vport **vports;
3739 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3742 /* stop port and all timers associated with this hba */
3743 lpfc_stop_port(phba);
3745 /* Tear down the local and target port registrations. The
3746 * nvme transports need to cleanup.
3748 lpfc_nvmet_destroy_targetport(phba);
3749 lpfc_nvme_destroy_localport(phba->pport);
3751 vports = lpfc_create_vport_work_array(phba);
3753 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3754 lpfc_stop_vport_timers(vports[i]);
3755 lpfc_destroy_vport_work_array(phba, vports);
3756 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3757 "0460 Bring Adapter offline\n");
3758 /* Bring down the SLI Layer and cleanup. The HBA is offline
3760 lpfc_sli_hba_down(phba);
3761 spin_lock_irq(&phba->hbalock);
3763 spin_unlock_irq(&phba->hbalock);
3764 vports = lpfc_create_vport_work_array(phba);
3766 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3767 shost = lpfc_shost_from_vport(vports[i]);
3768 spin_lock_irq(shost->host_lock);
3769 vports[i]->work_port_events = 0;
3770 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3771 spin_unlock_irq(shost->host_lock);
3773 lpfc_destroy_vport_work_array(phba, vports);
3774 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3777 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3778 __lpfc_cpuhp_remove(phba);
3780 if (phba->cfg_xri_rebalancing)
3781 lpfc_destroy_multixri_pools(phba);
3785 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3786 * @phba: pointer to lpfc hba data structure.
3788 * This routine is to free all the SCSI buffers and IOCBs from the driver
3789 * list back to kernel. It is called from lpfc_pci_remove_one to free
3790 * the internal resources before the device is removed from the system.
3793 lpfc_scsi_free(struct lpfc_hba *phba)
3795 struct lpfc_io_buf *sb, *sb_next;
3797 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3800 spin_lock_irq(&phba->hbalock);
3802 /* Release all the lpfc_scsi_bufs maintained by this host. */
3804 spin_lock(&phba->scsi_buf_list_put_lock);
3805 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3807 list_del(&sb->list);
3808 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3811 phba->total_scsi_bufs--;
3813 spin_unlock(&phba->scsi_buf_list_put_lock);
3815 spin_lock(&phba->scsi_buf_list_get_lock);
3816 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3818 list_del(&sb->list);
3819 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3822 phba->total_scsi_bufs--;
3824 spin_unlock(&phba->scsi_buf_list_get_lock);
3825 spin_unlock_irq(&phba->hbalock);
3829 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3830 * @phba: pointer to lpfc hba data structure.
3832 * This routine is to free all the IO buffers and IOCBs from the driver
3833 * list back to kernel. It is called from lpfc_pci_remove_one to free
3834 * the internal resources before the device is removed from the system.
3837 lpfc_io_free(struct lpfc_hba *phba)
3839 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3840 struct lpfc_sli4_hdw_queue *qp;
3843 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3844 qp = &phba->sli4_hba.hdwq[idx];
3845 /* Release all the lpfc_nvme_bufs maintained by this host. */
3846 spin_lock(&qp->io_buf_list_put_lock);
3847 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3848 &qp->lpfc_io_buf_list_put,
3850 list_del(&lpfc_ncmd->list);
3852 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3853 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3854 if (phba->cfg_xpsgl && !phba->nvmet_support)
3855 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3856 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3858 qp->total_io_bufs--;
3860 spin_unlock(&qp->io_buf_list_put_lock);
3862 spin_lock(&qp->io_buf_list_get_lock);
3863 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3864 &qp->lpfc_io_buf_list_get,
3866 list_del(&lpfc_ncmd->list);
3868 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3869 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3870 if (phba->cfg_xpsgl && !phba->nvmet_support)
3871 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3872 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3874 qp->total_io_bufs--;
3876 spin_unlock(&qp->io_buf_list_get_lock);
3881 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3882 * @phba: pointer to lpfc hba data structure.
3884 * This routine first calculates the sizes of the current els and allocated
3885 * scsi sgl lists, and then goes through all sgls to updates the physical
3886 * XRIs assigned due to port function reset. During port initialization, the
3887 * current els and allocated scsi sgl lists are 0s.
3890 * 0 - successful (for now, it always returns 0)
3893 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3895 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3896 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3897 LIST_HEAD(els_sgl_list);
3901 * update on pci function's els xri-sgl list
3903 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3905 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3906 /* els xri-sgl expanded */
3907 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3909 "3157 ELS xri-sgl count increased from "
3910 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3912 /* allocate the additional els sgls */
3913 for (i = 0; i < xri_cnt; i++) {
3914 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3916 if (sglq_entry == NULL) {
3917 lpfc_printf_log(phba, KERN_ERR,
3919 "2562 Failure to allocate an "
3920 "ELS sgl entry:%d\n", i);
3924 sglq_entry->buff_type = GEN_BUFF_TYPE;
3925 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3927 if (sglq_entry->virt == NULL) {
3929 lpfc_printf_log(phba, KERN_ERR,
3931 "2563 Failure to allocate an "
3932 "ELS mbuf:%d\n", i);
3936 sglq_entry->sgl = sglq_entry->virt;
3937 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3938 sglq_entry->state = SGL_FREED;
3939 list_add_tail(&sglq_entry->list, &els_sgl_list);
3941 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3942 list_splice_init(&els_sgl_list,
3943 &phba->sli4_hba.lpfc_els_sgl_list);
3944 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3945 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3946 /* els xri-sgl shrinked */
3947 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3948 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3949 "3158 ELS xri-sgl count decreased from "
3950 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3952 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3953 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3955 /* release extra els sgls from list */
3956 for (i = 0; i < xri_cnt; i++) {
3957 list_remove_head(&els_sgl_list,
3958 sglq_entry, struct lpfc_sglq, list);
3960 __lpfc_mbuf_free(phba, sglq_entry->virt,
3965 list_splice_init(&els_sgl_list,
3966 &phba->sli4_hba.lpfc_els_sgl_list);
3967 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3969 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3970 "3163 ELS xri-sgl count unchanged: %d\n",
3972 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3974 /* update xris to els sgls on the list */
3976 sglq_entry_next = NULL;
3977 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3978 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3979 lxri = lpfc_sli4_next_xritag(phba);
3980 if (lxri == NO_XRI) {
3981 lpfc_printf_log(phba, KERN_ERR,
3983 "2400 Failed to allocate xri for "
3988 sglq_entry->sli4_lxritag = lxri;
3989 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3994 lpfc_free_els_sgl_list(phba);
3999 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4000 * @phba: pointer to lpfc hba data structure.
4002 * This routine first calculates the sizes of the current els and allocated
4003 * scsi sgl lists, and then goes through all sgls to updates the physical
4004 * XRIs assigned due to port function reset. During port initialization, the
4005 * current els and allocated scsi sgl lists are 0s.
4008 * 0 - successful (for now, it always returns 0)
4011 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4013 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4014 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4015 uint16_t nvmet_xri_cnt;
4016 LIST_HEAD(nvmet_sgl_list);
4020 * update on pci function's nvmet xri-sgl list
4022 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4024 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4025 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4026 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4027 /* els xri-sgl expanded */
4028 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4029 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4030 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4031 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4032 /* allocate the additional nvmet sgls */
4033 for (i = 0; i < xri_cnt; i++) {
4034 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4036 if (sglq_entry == NULL) {
4037 lpfc_printf_log(phba, KERN_ERR,
4039 "6303 Failure to allocate an "
4040 "NVMET sgl entry:%d\n", i);
4044 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4045 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4047 if (sglq_entry->virt == NULL) {
4049 lpfc_printf_log(phba, KERN_ERR,
4051 "6304 Failure to allocate an "
4052 "NVMET buf:%d\n", i);
4056 sglq_entry->sgl = sglq_entry->virt;
4057 memset(sglq_entry->sgl, 0,
4058 phba->cfg_sg_dma_buf_size);
4059 sglq_entry->state = SGL_FREED;
4060 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4062 spin_lock_irq(&phba->hbalock);
4063 spin_lock(&phba->sli4_hba.sgl_list_lock);
4064 list_splice_init(&nvmet_sgl_list,
4065 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4066 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4067 spin_unlock_irq(&phba->hbalock);
4068 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4069 /* nvmet xri-sgl shrunk */
4070 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4071 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4072 "6305 NVMET xri-sgl count decreased from "
4073 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4075 spin_lock_irq(&phba->hbalock);
4076 spin_lock(&phba->sli4_hba.sgl_list_lock);
4077 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4079 /* release extra nvmet sgls from list */
4080 for (i = 0; i < xri_cnt; i++) {
4081 list_remove_head(&nvmet_sgl_list,
4082 sglq_entry, struct lpfc_sglq, list);
4084 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4089 list_splice_init(&nvmet_sgl_list,
4090 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4091 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4092 spin_unlock_irq(&phba->hbalock);
4094 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4095 "6306 NVMET xri-sgl count unchanged: %d\n",
4097 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4099 /* update xris to nvmet sgls on the list */
4101 sglq_entry_next = NULL;
4102 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4103 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4104 lxri = lpfc_sli4_next_xritag(phba);
4105 if (lxri == NO_XRI) {
4106 lpfc_printf_log(phba, KERN_ERR,
4108 "6307 Failed to allocate xri for "
4113 sglq_entry->sli4_lxritag = lxri;
4114 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4119 lpfc_free_nvmet_sgl_list(phba);
4124 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4127 struct lpfc_sli4_hdw_queue *qp;
4128 struct lpfc_io_buf *lpfc_cmd;
4129 struct lpfc_io_buf *iobufp, *prev_iobufp;
4130 int idx, cnt, xri, inserted;
4133 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4134 qp = &phba->sli4_hba.hdwq[idx];
4135 spin_lock_irq(&qp->io_buf_list_get_lock);
4136 spin_lock(&qp->io_buf_list_put_lock);
4138 /* Take everything off the get and put lists */
4139 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4140 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4141 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4142 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4143 cnt += qp->get_io_bufs + qp->put_io_bufs;
4144 qp->get_io_bufs = 0;
4145 qp->put_io_bufs = 0;
4146 qp->total_io_bufs = 0;
4147 spin_unlock(&qp->io_buf_list_put_lock);
4148 spin_unlock_irq(&qp->io_buf_list_get_lock);
4152 * Take IO buffers off blist and put on cbuf sorted by XRI.
4153 * This is because POST_SGL takes a sequential range of XRIs
4154 * to post to the firmware.
4156 for (idx = 0; idx < cnt; idx++) {
4157 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4161 list_add_tail(&lpfc_cmd->list, cbuf);
4164 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4167 list_for_each_entry(iobufp, cbuf, list) {
4168 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4170 list_add(&lpfc_cmd->list,
4171 &prev_iobufp->list);
4173 list_add(&lpfc_cmd->list, cbuf);
4177 prev_iobufp = iobufp;
4180 list_add_tail(&lpfc_cmd->list, cbuf);
4186 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4188 struct lpfc_sli4_hdw_queue *qp;
4189 struct lpfc_io_buf *lpfc_cmd;
4192 qp = phba->sli4_hba.hdwq;
4194 while (!list_empty(cbuf)) {
4195 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4196 list_remove_head(cbuf, lpfc_cmd,
4197 struct lpfc_io_buf, list);
4201 qp = &phba->sli4_hba.hdwq[idx];
4202 lpfc_cmd->hdwq_no = idx;
4203 lpfc_cmd->hdwq = qp;
4204 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4205 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4206 spin_lock(&qp->io_buf_list_put_lock);
4207 list_add_tail(&lpfc_cmd->list,
4208 &qp->lpfc_io_buf_list_put);
4210 qp->total_io_bufs++;
4211 spin_unlock(&qp->io_buf_list_put_lock);
4218 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4219 * @phba: pointer to lpfc hba data structure.
4221 * This routine first calculates the sizes of the current els and allocated
4222 * scsi sgl lists, and then goes through all sgls to updates the physical
4223 * XRIs assigned due to port function reset. During port initialization, the
4224 * current els and allocated scsi sgl lists are 0s.
4227 * 0 - successful (for now, it always returns 0)
4230 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4232 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4233 uint16_t i, lxri, els_xri_cnt;
4234 uint16_t io_xri_cnt, io_xri_max;
4235 LIST_HEAD(io_sgl_list);
4239 * update on pci function's allocated nvme xri-sgl list
4242 /* maximum number of xris available for nvme buffers */
4243 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4244 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4245 phba->sli4_hba.io_xri_max = io_xri_max;
4247 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4248 "6074 Current allocated XRI sgl count:%d, "
4249 "maximum XRI count:%d\n",
4250 phba->sli4_hba.io_xri_cnt,
4251 phba->sli4_hba.io_xri_max);
4253 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4255 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4256 /* max nvme xri shrunk below the allocated nvme buffers */
4257 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4258 phba->sli4_hba.io_xri_max;
4259 /* release the extra allocated nvme buffers */
4260 for (i = 0; i < io_xri_cnt; i++) {
4261 list_remove_head(&io_sgl_list, lpfc_ncmd,
4262 struct lpfc_io_buf, list);
4264 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4266 lpfc_ncmd->dma_handle);
4270 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4273 /* update xris associated to remaining allocated nvme buffers */
4275 lpfc_ncmd_next = NULL;
4276 phba->sli4_hba.io_xri_cnt = cnt;
4277 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4278 &io_sgl_list, list) {
4279 lxri = lpfc_sli4_next_xritag(phba);
4280 if (lxri == NO_XRI) {
4281 lpfc_printf_log(phba, KERN_ERR,
4283 "6075 Failed to allocate xri for "
4288 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4289 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4291 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4300 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4301 * @phba: Pointer to lpfc hba data structure.
4302 * @num_to_alloc: The requested number of buffers to allocate.
4304 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4305 * the nvme buffer contains all the necessary information needed to initiate
4306 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4307 * them on a list, it post them to the port by using SGL block post.
4310 * int - number of IO buffers that were allocated and posted.
4311 * 0 = failure, less than num_to_alloc is a partial failure.
4314 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4316 struct lpfc_io_buf *lpfc_ncmd;
4317 struct lpfc_iocbq *pwqeq;
4318 uint16_t iotag, lxri = 0;
4319 int bcnt, num_posted;
4320 LIST_HEAD(prep_nblist);
4321 LIST_HEAD(post_nblist);
4322 LIST_HEAD(nvme_nblist);
4324 phba->sli4_hba.io_xri_cnt = 0;
4325 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4326 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4330 * Get memory from the pci pool to map the virt space to
4331 * pci bus space for an I/O. The DMA buffer includes the
4332 * number of SGE's necessary to support the sg_tablesize.
4334 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4336 &lpfc_ncmd->dma_handle);
4337 if (!lpfc_ncmd->data) {
4342 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4343 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4346 * 4K Page alignment is CRITICAL to BlockGuard, double
4349 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4350 (((unsigned long)(lpfc_ncmd->data) &
4351 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4352 lpfc_printf_log(phba, KERN_ERR,
4354 "3369 Memory alignment err: "
4356 (unsigned long)lpfc_ncmd->data);
4357 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4359 lpfc_ncmd->dma_handle);
4365 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4367 lxri = lpfc_sli4_next_xritag(phba);
4368 if (lxri == NO_XRI) {
4369 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4370 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4374 pwqeq = &lpfc_ncmd->cur_iocbq;
4376 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4377 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4379 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4380 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4382 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4383 "6121 Failed to allocate IOTAG for"
4384 " XRI:0x%x\n", lxri);
4385 lpfc_sli4_free_xri(phba, lxri);
4388 pwqeq->sli4_lxritag = lxri;
4389 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4390 pwqeq->context1 = lpfc_ncmd;
4392 /* Initialize local short-hand pointers. */
4393 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4394 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4395 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4396 spin_lock_init(&lpfc_ncmd->buf_lock);
4398 /* add the nvme buffer to a post list */
4399 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4400 phba->sli4_hba.io_xri_cnt++;
4402 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4403 "6114 Allocate %d out of %d requested new NVME "
4404 "buffers\n", bcnt, num_to_alloc);
4406 /* post the list of nvme buffer sgls to port if available */
4407 if (!list_empty(&post_nblist))
4408 num_posted = lpfc_sli4_post_io_sgl_list(
4409 phba, &post_nblist, bcnt);
4417 lpfc_get_wwpn(struct lpfc_hba *phba)
4421 LPFC_MBOXQ_t *mboxq;
4424 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4427 return (uint64_t)-1;
4429 /* First get WWN of HBA instance */
4430 lpfc_read_nv(phba, mboxq);
4431 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4432 if (rc != MBX_SUCCESS) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4434 "6019 Mailbox failed , mbxCmd x%x "
4435 "READ_NV, mbxStatus x%x\n",
4436 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4437 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4438 mempool_free(mboxq, phba->mbox_mem_pool);
4439 return (uint64_t) -1;
4442 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4443 /* wwn is WWPN of HBA instance */
4444 mempool_free(mboxq, phba->mbox_mem_pool);
4445 if (phba->sli_rev == LPFC_SLI_REV4)
4446 return be64_to_cpu(wwn);
4448 return rol64(wwn, 32);
4452 * lpfc_vmid_res_alloc - Allocates resources for VMID
4453 * @phba: pointer to lpfc hba data structure.
4454 * @vport: pointer to vport data structure
4456 * This routine allocated the resources needed for the VMID.
4463 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4465 /* VMID feature is supported only on SLI4 */
4466 if (phba->sli_rev == LPFC_SLI_REV3) {
4467 phba->cfg_vmid_app_header = 0;
4468 phba->cfg_vmid_priority_tagging = 0;
4471 if (lpfc_is_vmid_enabled(phba)) {
4473 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4478 rwlock_init(&vport->vmid_lock);
4480 /* Set the VMID parameters for the vport */
4481 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4482 vport->vmid_inactivity_timeout =
4483 phba->cfg_vmid_inactivity_timeout;
4484 vport->max_vmid = phba->cfg_max_vmid;
4485 vport->cur_vmid_cnt = 0;
4487 vport->vmid_priority_range = bitmap_zalloc
4488 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4490 if (!vport->vmid_priority_range) {
4495 hash_init(vport->hash_table);
4501 * lpfc_create_port - Create an FC port
4502 * @phba: pointer to lpfc hba data structure.
4503 * @instance: a unique integer ID to this FC port.
4504 * @dev: pointer to the device data structure.
4506 * This routine creates a FC port for the upper layer protocol. The FC port
4507 * can be created on top of either a physical port or a virtual port provided
4508 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4509 * and associates the FC port created before adding the shost into the SCSI
4513 * @vport - pointer to the virtual N_Port data structure.
4514 * NULL - port create failed.
4517 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4519 struct lpfc_vport *vport;
4520 struct Scsi_Host *shost = NULL;
4521 struct scsi_host_template *template;
4525 bool use_no_reset_hba = false;
4528 if (lpfc_no_hba_reset_cnt) {
4529 if (phba->sli_rev < LPFC_SLI_REV4 &&
4530 dev == &phba->pcidev->dev) {
4531 /* Reset the port first */
4532 lpfc_sli_brdrestart(phba);
4533 rc = lpfc_sli_chipset_init(phba);
4537 wwn = lpfc_get_wwpn(phba);
4540 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4541 if (wwn == lpfc_no_hba_reset[i]) {
4542 lpfc_printf_log(phba, KERN_ERR,
4544 "6020 Setting use_no_reset port=%llx\n",
4546 use_no_reset_hba = true;
4551 /* Seed template for SCSI host registration */
4552 if (dev == &phba->pcidev->dev) {
4553 template = &phba->port_template;
4555 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4556 /* Seed physical port template */
4557 memcpy(template, &lpfc_template, sizeof(*template));
4559 if (use_no_reset_hba)
4560 /* template is for a no reset SCSI Host */
4561 template->eh_host_reset_handler = NULL;
4563 /* Template for all vports this physical port creates */
4564 memcpy(&phba->vport_template, &lpfc_template,
4566 phba->vport_template.shost_attrs = lpfc_vport_attrs;
4567 phba->vport_template.eh_bus_reset_handler = NULL;
4568 phba->vport_template.eh_host_reset_handler = NULL;
4569 phba->vport_template.vendor_id = 0;
4571 /* Initialize the host templates with updated value */
4572 if (phba->sli_rev == LPFC_SLI_REV4) {
4573 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4574 phba->vport_template.sg_tablesize =
4575 phba->cfg_scsi_seg_cnt;
4577 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4578 phba->vport_template.sg_tablesize =
4579 phba->cfg_sg_seg_cnt;
4583 /* NVMET is for physical port only */
4584 memcpy(template, &lpfc_template_nvme,
4588 template = &phba->vport_template;
4591 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4595 vport = (struct lpfc_vport *) shost->hostdata;
4597 vport->load_flag |= FC_LOADING;
4598 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4599 vport->fc_rscn_flush = 0;
4600 lpfc_get_vport_cfgparam(vport);
4602 /* Adjust value in vport */
4603 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4605 shost->unique_id = instance;
4606 shost->max_id = LPFC_MAX_TARGET;
4607 shost->max_lun = vport->cfg_max_luns;
4608 shost->this_id = -1;
4609 shost->max_cmd_len = 16;
4611 if (phba->sli_rev == LPFC_SLI_REV4) {
4612 if (!phba->cfg_fcp_mq_threshold ||
4613 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4614 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4616 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4617 phba->cfg_fcp_mq_threshold);
4619 shost->dma_boundary =
4620 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4622 if (phba->cfg_xpsgl && !phba->nvmet_support)
4623 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4625 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4627 /* SLI-3 has a limited number of hardware queues (3),
4628 * thus there is only one for FCP processing.
4630 shost->nr_hw_queues = 1;
4633 * Set initial can_queue value since 0 is no longer supported and
4634 * scsi_add_host will fail. This will be adjusted later based on the
4635 * max xri value determined in hba setup.
4637 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4638 if (dev != &phba->pcidev->dev) {
4639 shost->transportt = lpfc_vport_transport_template;
4640 vport->port_type = LPFC_NPIV_PORT;
4642 shost->transportt = lpfc_transport_template;
4643 vport->port_type = LPFC_PHYSICAL_PORT;
4646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4647 "9081 CreatePort TMPLATE type %x TBLsize %d "
4649 vport->port_type, shost->sg_tablesize,
4650 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4652 /* Allocate the resources for VMID */
4653 rc = lpfc_vmid_res_alloc(phba, vport);
4658 /* Initialize all internally managed lists. */
4659 INIT_LIST_HEAD(&vport->fc_nodes);
4660 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4661 spin_lock_init(&vport->work_port_lock);
4663 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4665 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4667 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4669 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4670 lpfc_setup_bg(phba, shost);
4672 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4676 spin_lock_irq(&phba->port_list_lock);
4677 list_add_tail(&vport->listentry, &phba->port_list);
4678 spin_unlock_irq(&phba->port_list_lock);
4683 bitmap_free(vport->vmid_priority_range);
4684 scsi_host_put(shost);
4690 * destroy_port - destroy an FC port
4691 * @vport: pointer to an lpfc virtual N_Port data structure.
4693 * This routine destroys a FC port from the upper layer protocol. All the
4694 * resources associated with the port are released.
4697 destroy_port(struct lpfc_vport *vport)
4699 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4700 struct lpfc_hba *phba = vport->phba;
4702 lpfc_debugfs_terminate(vport);
4703 fc_remove_host(shost);
4704 scsi_remove_host(shost);
4706 spin_lock_irq(&phba->port_list_lock);
4707 list_del_init(&vport->listentry);
4708 spin_unlock_irq(&phba->port_list_lock);
4710 lpfc_cleanup(vport);
4715 * lpfc_get_instance - Get a unique integer ID
4717 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4718 * uses the kernel idr facility to perform the task.
4721 * instance - a unique integer ID allocated as the new instance.
4722 * -1 - lpfc get instance failed.
4725 lpfc_get_instance(void)
4729 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4730 return ret < 0 ? -1 : ret;
4734 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4735 * @shost: pointer to SCSI host data structure.
4736 * @time: elapsed time of the scan in jiffies.
4738 * This routine is called by the SCSI layer with a SCSI host to determine
4739 * whether the scan host is finished.
4741 * Note: there is no scan_start function as adapter initialization will have
4742 * asynchronously kicked off the link initialization.
4745 * 0 - SCSI host scan is not over yet.
4746 * 1 - SCSI host scan is over.
4748 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4750 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4751 struct lpfc_hba *phba = vport->phba;
4754 spin_lock_irq(shost->host_lock);
4756 if (vport->load_flag & FC_UNLOADING) {
4760 if (time >= msecs_to_jiffies(30 * 1000)) {
4761 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4762 "0461 Scanning longer than 30 "
4763 "seconds. Continuing initialization\n");
4767 if (time >= msecs_to_jiffies(15 * 1000) &&
4768 phba->link_state <= LPFC_LINK_DOWN) {
4769 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4770 "0465 Link down longer than 15 "
4771 "seconds. Continuing initialization\n");
4776 if (vport->port_state != LPFC_VPORT_READY)
4778 if (vport->num_disc_nodes || vport->fc_prli_sent)
4780 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4782 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4788 spin_unlock_irq(shost->host_lock);
4792 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4794 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4795 struct lpfc_hba *phba = vport->phba;
4797 fc_host_supported_speeds(shost) = 0;
4799 * Avoid reporting supported link speed for FCoE as it can't be
4800 * controlled via FCoE.
4802 if (phba->hba_flag & HBA_FCOE_MODE)
4805 if (phba->lmt & LMT_256Gb)
4806 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4807 if (phba->lmt & LMT_128Gb)
4808 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4809 if (phba->lmt & LMT_64Gb)
4810 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4811 if (phba->lmt & LMT_32Gb)
4812 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4813 if (phba->lmt & LMT_16Gb)
4814 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4815 if (phba->lmt & LMT_10Gb)
4816 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4817 if (phba->lmt & LMT_8Gb)
4818 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4819 if (phba->lmt & LMT_4Gb)
4820 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4821 if (phba->lmt & LMT_2Gb)
4822 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4823 if (phba->lmt & LMT_1Gb)
4824 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4828 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4829 * @shost: pointer to SCSI host data structure.
4831 * This routine initializes a given SCSI host attributes on a FC port. The
4832 * SCSI host can be either on top of a physical port or a virtual port.
4834 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4836 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4837 struct lpfc_hba *phba = vport->phba;
4839 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
4842 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4843 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4844 fc_host_supported_classes(shost) = FC_COS_CLASS3;
4846 memset(fc_host_supported_fc4s(shost), 0,
4847 sizeof(fc_host_supported_fc4s(shost)));
4848 fc_host_supported_fc4s(shost)[2] = 1;
4849 fc_host_supported_fc4s(shost)[7] = 1;
4851 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4852 sizeof fc_host_symbolic_name(shost));
4854 lpfc_host_supported_speeds_set(shost);
4856 fc_host_maxframe_size(shost) =
4857 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4858 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4860 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4862 /* This value is also unchanging */
4863 memset(fc_host_active_fc4s(shost), 0,
4864 sizeof(fc_host_active_fc4s(shost)));
4865 fc_host_active_fc4s(shost)[2] = 1;
4866 fc_host_active_fc4s(shost)[7] = 1;
4868 fc_host_max_npiv_vports(shost) = phba->max_vpi;
4869 spin_lock_irq(shost->host_lock);
4870 vport->load_flag &= ~FC_LOADING;
4871 spin_unlock_irq(shost->host_lock);
4875 * lpfc_stop_port_s3 - Stop SLI3 device port
4876 * @phba: pointer to lpfc hba data structure.
4878 * This routine is invoked to stop an SLI3 device port, it stops the device
4879 * from generating interrupts and stops the device driver's timers for the
4883 lpfc_stop_port_s3(struct lpfc_hba *phba)
4885 /* Clear all interrupt enable conditions */
4886 writel(0, phba->HCregaddr);
4887 readl(phba->HCregaddr); /* flush */
4888 /* Clear all pending interrupts */
4889 writel(0xffffffff, phba->HAregaddr);
4890 readl(phba->HAregaddr); /* flush */
4892 /* Reset some HBA SLI setup states */
4893 lpfc_stop_hba_timers(phba);
4894 phba->pport->work_port_events = 0;
4898 * lpfc_stop_port_s4 - Stop SLI4 device port
4899 * @phba: pointer to lpfc hba data structure.
4901 * This routine is invoked to stop an SLI4 device port, it stops the device
4902 * from generating interrupts and stops the device driver's timers for the
4906 lpfc_stop_port_s4(struct lpfc_hba *phba)
4908 /* Reset some HBA SLI4 setup states */
4909 lpfc_stop_hba_timers(phba);
4911 phba->pport->work_port_events = 0;
4912 phba->sli4_hba.intr_enable = 0;
4916 * lpfc_stop_port - Wrapper function for stopping hba port
4917 * @phba: Pointer to HBA context object.
4919 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4920 * the API jump table function pointer from the lpfc_hba struct.
4923 lpfc_stop_port(struct lpfc_hba *phba)
4925 phba->lpfc_stop_port(phba);
4928 flush_workqueue(phba->wq);
4932 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4933 * @phba: Pointer to hba for which this call is being executed.
4935 * This routine starts the timer waiting for the FCF rediscovery to complete.
4938 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4940 unsigned long fcf_redisc_wait_tmo =
4941 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4942 /* Start fcf rediscovery wait period timer */
4943 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4944 spin_lock_irq(&phba->hbalock);
4945 /* Allow action to new fcf asynchronous event */
4946 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4947 /* Mark the FCF rediscovery pending state */
4948 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4949 spin_unlock_irq(&phba->hbalock);
4953 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4954 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4956 * This routine is invoked when waiting for FCF table rediscover has been
4957 * timed out. If new FCF record(s) has (have) been discovered during the
4958 * wait period, a new FCF event shall be added to the FCOE async event
4959 * list, and then worker thread shall be waked up for processing from the
4960 * worker thread context.
4963 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4965 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4967 /* Don't send FCF rediscovery event if timer cancelled */
4968 spin_lock_irq(&phba->hbalock);
4969 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4970 spin_unlock_irq(&phba->hbalock);
4973 /* Clear FCF rediscovery timer pending flag */
4974 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4975 /* FCF rediscovery event to worker thread */
4976 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4977 spin_unlock_irq(&phba->hbalock);
4978 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4979 "2776 FCF rediscover quiescent timer expired\n");
4980 /* wake up worker thread */
4981 lpfc_worker_wake_up(phba);
4985 * lpfc_vmid_poll - VMID timeout detection
4986 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
4988 * This routine is invoked when there is no I/O on by a VM for the specified
4989 * amount of time. When this situation is detected, the VMID has to be
4990 * deregistered from the switch and all the local resources freed. The VMID
4991 * will be reassigned to the VM once the I/O begins.
4994 lpfc_vmid_poll(struct timer_list *t)
4996 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
4999 /* check if there is a need to issue QFPA */
5000 if (phba->pport->vmid_priority_tagging) {
5002 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5005 /* Is the vmid inactivity timer enabled */
5006 if (phba->pport->vmid_inactivity_timeout ||
5007 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5009 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5013 lpfc_worker_wake_up(phba);
5015 /* restart the timer for the next iteration */
5016 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5021 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5022 * @phba: pointer to lpfc hba data structure.
5023 * @acqe_link: pointer to the async link completion queue entry.
5025 * This routine is to parse the SLI4 link-attention link fault code.
5028 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5029 struct lpfc_acqe_link *acqe_link)
5031 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5032 case LPFC_ASYNC_LINK_FAULT_NONE:
5033 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5034 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5035 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5039 "0398 Unknown link fault code: x%x\n",
5040 bf_get(lpfc_acqe_link_fault, acqe_link));
5046 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5047 * @phba: pointer to lpfc hba data structure.
5048 * @acqe_link: pointer to the async link completion queue entry.
5050 * This routine is to parse the SLI4 link attention type and translate it
5051 * into the base driver's link attention type coding.
5053 * Return: Link attention type in terms of base driver's coding.
5056 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5057 struct lpfc_acqe_link *acqe_link)
5061 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5062 case LPFC_ASYNC_LINK_STATUS_DOWN:
5063 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5064 att_type = LPFC_ATT_LINK_DOWN;
5066 case LPFC_ASYNC_LINK_STATUS_UP:
5067 /* Ignore physical link up events - wait for logical link up */
5068 att_type = LPFC_ATT_RESERVED;
5070 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5071 att_type = LPFC_ATT_LINK_UP;
5074 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5075 "0399 Invalid link attention type: x%x\n",
5076 bf_get(lpfc_acqe_link_status, acqe_link));
5077 att_type = LPFC_ATT_RESERVED;
5084 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5085 * @phba: pointer to lpfc hba data structure.
5087 * This routine is to get an SLI3 FC port's link speed in Mbps.
5089 * Return: link speed in terms of Mbps.
5092 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5094 uint32_t link_speed;
5096 if (!lpfc_is_link_up(phba))
5099 if (phba->sli_rev <= LPFC_SLI_REV3) {
5100 switch (phba->fc_linkspeed) {
5101 case LPFC_LINK_SPEED_1GHZ:
5104 case LPFC_LINK_SPEED_2GHZ:
5107 case LPFC_LINK_SPEED_4GHZ:
5110 case LPFC_LINK_SPEED_8GHZ:
5113 case LPFC_LINK_SPEED_10GHZ:
5116 case LPFC_LINK_SPEED_16GHZ:
5123 if (phba->sli4_hba.link_state.logical_speed)
5125 phba->sli4_hba.link_state.logical_speed;
5127 link_speed = phba->sli4_hba.link_state.speed;
5133 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5134 * @phba: pointer to lpfc hba data structure.
5135 * @evt_code: asynchronous event code.
5136 * @speed_code: asynchronous event link speed code.
5138 * This routine is to parse the giving SLI4 async event link speed code into
5139 * value of Mbps for the link speed.
5141 * Return: link speed in terms of Mbps.
5144 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5147 uint32_t port_speed;
5150 case LPFC_TRAILER_CODE_LINK:
5151 switch (speed_code) {
5152 case LPFC_ASYNC_LINK_SPEED_ZERO:
5155 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5158 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5161 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5164 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5167 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5170 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5173 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5176 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5177 port_speed = 100000;
5183 case LPFC_TRAILER_CODE_FC:
5184 switch (speed_code) {
5185 case LPFC_FC_LA_SPEED_UNKNOWN:
5188 case LPFC_FC_LA_SPEED_1G:
5191 case LPFC_FC_LA_SPEED_2G:
5194 case LPFC_FC_LA_SPEED_4G:
5197 case LPFC_FC_LA_SPEED_8G:
5200 case LPFC_FC_LA_SPEED_10G:
5203 case LPFC_FC_LA_SPEED_16G:
5206 case LPFC_FC_LA_SPEED_32G:
5209 case LPFC_FC_LA_SPEED_64G:
5212 case LPFC_FC_LA_SPEED_128G:
5213 port_speed = 128000;
5215 case LPFC_FC_LA_SPEED_256G:
5216 port_speed = 256000;
5229 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5230 * @phba: pointer to lpfc hba data structure.
5231 * @acqe_link: pointer to the async link completion queue entry.
5233 * This routine is to handle the SLI4 asynchronous FCoE link event.
5236 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5237 struct lpfc_acqe_link *acqe_link)
5239 struct lpfc_dmabuf *mp;
5242 struct lpfc_mbx_read_top *la;
5246 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5247 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5249 phba->fcoe_eventtag = acqe_link->event_tag;
5250 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5253 "0395 The mboxq allocation failed\n");
5256 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5259 "0396 The lpfc_dmabuf allocation failed\n");
5262 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
5264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5265 "0397 The mbuf allocation failed\n");
5266 goto out_free_dmabuf;
5269 /* Cleanup any outstanding ELS commands */
5270 lpfc_els_flush_all_cmd(phba);
5272 /* Block ELS IOCBs until we have done process link event */
5273 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5275 /* Update link event statistics */
5276 phba->sli.slistat.link_event++;
5278 /* Create lpfc_handle_latt mailbox command from link ACQE */
5279 lpfc_read_topology(phba, pmb, mp);
5280 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5281 pmb->vport = phba->pport;
5283 /* Keep the link status for extra SLI4 state machine reference */
5284 phba->sli4_hba.link_state.speed =
5285 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5286 bf_get(lpfc_acqe_link_speed, acqe_link));
5287 phba->sli4_hba.link_state.duplex =
5288 bf_get(lpfc_acqe_link_duplex, acqe_link);
5289 phba->sli4_hba.link_state.status =
5290 bf_get(lpfc_acqe_link_status, acqe_link);
5291 phba->sli4_hba.link_state.type =
5292 bf_get(lpfc_acqe_link_type, acqe_link);
5293 phba->sli4_hba.link_state.number =
5294 bf_get(lpfc_acqe_link_number, acqe_link);
5295 phba->sli4_hba.link_state.fault =
5296 bf_get(lpfc_acqe_link_fault, acqe_link);
5297 phba->sli4_hba.link_state.logical_speed =
5298 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5301 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5302 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5303 "Logical speed:%dMbps Fault:%d\n",
5304 phba->sli4_hba.link_state.speed,
5305 phba->sli4_hba.link_state.topology,
5306 phba->sli4_hba.link_state.status,
5307 phba->sli4_hba.link_state.type,
5308 phba->sli4_hba.link_state.number,
5309 phba->sli4_hba.link_state.logical_speed,
5310 phba->sli4_hba.link_state.fault);
5312 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5313 * topology info. Note: Optional for non FC-AL ports.
5315 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5316 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5317 if (rc == MBX_NOT_FINISHED) {
5318 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5319 goto out_free_dmabuf;
5324 * For FCoE Mode: fill in all the topology information we need and call
5325 * the READ_TOPOLOGY completion routine to continue without actually
5326 * sending the READ_TOPOLOGY mailbox command to the port.
5328 /* Initialize completion status */
5330 mb->mbxStatus = MBX_SUCCESS;
5332 /* Parse port fault information field */
5333 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5335 /* Parse and translate link attention fields */
5336 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5337 la->eventTag = acqe_link->event_tag;
5338 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5339 bf_set(lpfc_mbx_read_top_link_spd, la,
5340 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5342 /* Fake the the following irrelvant fields */
5343 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5344 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5345 bf_set(lpfc_mbx_read_top_il, la, 0);
5346 bf_set(lpfc_mbx_read_top_pb, la, 0);
5347 bf_set(lpfc_mbx_read_top_fa, la, 0);
5348 bf_set(lpfc_mbx_read_top_mm, la, 0);
5350 /* Invoke the lpfc_handle_latt mailbox command callback function */
5351 lpfc_mbx_cmpl_read_topology(phba, pmb);
5358 mempool_free(pmb, phba->mbox_mem_pool);
5362 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5364 * @phba: pointer to lpfc hba data structure.
5365 * @speed_code: asynchronous event link speed code.
5367 * This routine is to parse the giving SLI4 async event link speed code into
5368 * value of Read topology link speed.
5370 * Return: link speed in terms of Read topology.
5373 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5377 switch (speed_code) {
5378 case LPFC_FC_LA_SPEED_1G:
5379 port_speed = LPFC_LINK_SPEED_1GHZ;
5381 case LPFC_FC_LA_SPEED_2G:
5382 port_speed = LPFC_LINK_SPEED_2GHZ;
5384 case LPFC_FC_LA_SPEED_4G:
5385 port_speed = LPFC_LINK_SPEED_4GHZ;
5387 case LPFC_FC_LA_SPEED_8G:
5388 port_speed = LPFC_LINK_SPEED_8GHZ;
5390 case LPFC_FC_LA_SPEED_16G:
5391 port_speed = LPFC_LINK_SPEED_16GHZ;
5393 case LPFC_FC_LA_SPEED_32G:
5394 port_speed = LPFC_LINK_SPEED_32GHZ;
5396 case LPFC_FC_LA_SPEED_64G:
5397 port_speed = LPFC_LINK_SPEED_64GHZ;
5399 case LPFC_FC_LA_SPEED_128G:
5400 port_speed = LPFC_LINK_SPEED_128GHZ;
5402 case LPFC_FC_LA_SPEED_256G:
5403 port_speed = LPFC_LINK_SPEED_256GHZ;
5414 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5416 struct rxtable_entry *entry;
5417 int cnt = 0, head, tail, last, start;
5419 head = atomic_read(&phba->rxtable_idx_head);
5420 tail = atomic_read(&phba->rxtable_idx_tail);
5421 if (!phba->rxtable || head == tail) {
5422 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5423 "4411 Rxtable is empty\n");
5429 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5430 while (start != last) {
5434 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5435 entry = &phba->rxtable[start];
5436 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5437 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5438 "Lat %lld ASz %lld Info %02d BWUtil %d "
5440 cnt, entry->max_bytes_per_interval,
5441 entry->total_bytes, entry->rcv_bytes,
5442 entry->avg_io_latency, entry->avg_io_size,
5443 entry->cmf_info, entry->timer_utilization,
5444 entry->timer_interval, start);
5446 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5452 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5453 * @phba: pointer to lpfc hba data structure.
5454 * @dtag: FPIN descriptor received
5456 * Increment the FPIN received counter/time when it happens.
5459 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5461 struct lpfc_cgn_info *cp;
5463 struct timespec64 cur_time;
5467 /* Make sure we have a congestion info buffer */
5470 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5471 ktime_get_real_ts64(&cur_time);
5472 time64_to_tm(cur_time.tv_sec, 0, &broken);
5474 /* Update congestion statistics */
5476 case ELS_DTAG_LNK_INTEGRITY:
5477 cnt = le32_to_cpu(cp->link_integ_notification);
5479 cp->link_integ_notification = cpu_to_le32(cnt);
5481 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5482 cp->cgn_stat_lnk_day = broken.tm_mday;
5483 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5484 cp->cgn_stat_lnk_hour = broken.tm_hour;
5485 cp->cgn_stat_lnk_min = broken.tm_min;
5486 cp->cgn_stat_lnk_sec = broken.tm_sec;
5488 case ELS_DTAG_DELIVERY:
5489 cnt = le32_to_cpu(cp->delivery_notification);
5491 cp->delivery_notification = cpu_to_le32(cnt);
5493 cp->cgn_stat_del_month = broken.tm_mon + 1;
5494 cp->cgn_stat_del_day = broken.tm_mday;
5495 cp->cgn_stat_del_year = broken.tm_year - 100;
5496 cp->cgn_stat_del_hour = broken.tm_hour;
5497 cp->cgn_stat_del_min = broken.tm_min;
5498 cp->cgn_stat_del_sec = broken.tm_sec;
5500 case ELS_DTAG_PEER_CONGEST:
5501 cnt = le32_to_cpu(cp->cgn_peer_notification);
5503 cp->cgn_peer_notification = cpu_to_le32(cnt);
5505 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5506 cp->cgn_stat_peer_day = broken.tm_mday;
5507 cp->cgn_stat_peer_year = broken.tm_year - 100;
5508 cp->cgn_stat_peer_hour = broken.tm_hour;
5509 cp->cgn_stat_peer_min = broken.tm_min;
5510 cp->cgn_stat_peer_sec = broken.tm_sec;
5512 case ELS_DTAG_CONGESTION:
5513 cnt = le32_to_cpu(cp->cgn_notification);
5515 cp->cgn_notification = cpu_to_le32(cnt);
5517 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5518 cp->cgn_stat_cgn_day = broken.tm_mday;
5519 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5520 cp->cgn_stat_cgn_hour = broken.tm_hour;
5521 cp->cgn_stat_cgn_min = broken.tm_min;
5522 cp->cgn_stat_cgn_sec = broken.tm_sec;
5524 if (phba->cgn_fpin_frequency &&
5525 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5526 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5527 cp->cgn_stat_npm = value;
5529 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5530 LPFC_CGN_CRC32_SEED);
5531 cp->cgn_info_crc = cpu_to_le32(value);
5535 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5536 * @phba: pointer to lpfc hba data structure.
5538 * Save the congestion event data every minute.
5539 * On the hour collapse all the minute data into hour data. Every day
5540 * collapse all the hour data into daily data. Separate driver
5541 * and fabrc congestion event counters that will be saved out
5542 * to the registered congestion buffer every minute.
5545 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5547 struct lpfc_cgn_info *cp;
5549 struct timespec64 cur_time;
5551 uint16_t value, mvalue;
5554 uint32_t dvalue, wvalue, lvalue, avalue;
5560 /* Make sure we have a congestion info buffer */
5563 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5565 if (time_before(jiffies, phba->cgn_evt_timestamp))
5567 phba->cgn_evt_timestamp = jiffies +
5568 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5569 phba->cgn_evt_minute++;
5571 /* We should get to this point in the routine on 1 minute intervals */
5573 ktime_get_real_ts64(&cur_time);
5574 time64_to_tm(cur_time.tv_sec, 0, &broken);
5576 if (phba->cgn_fpin_frequency &&
5577 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5578 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5579 cp->cgn_stat_npm = value;
5582 /* Read and clear the latency counters for this minute */
5583 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5584 latsum = atomic64_read(&phba->cgn_latency_evt);
5585 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5586 atomic64_set(&phba->cgn_latency_evt, 0);
5588 /* We need to store MB/sec bandwidth in the congestion information.
5589 * block_cnt is count of 512 byte blocks for the entire minute,
5590 * bps will get bytes per sec before finally converting to MB/sec.
5592 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5593 phba->rx_block_cnt = 0;
5594 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5597 /* cgn parameters */
5598 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5599 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5600 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5601 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5603 /* Fill in default LUN qdepth */
5604 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5605 cp->cgn_lunq = cpu_to_le16(value);
5607 /* Record congestion buffer info - every minute
5608 * cgn_driver_evt_cnt (Driver events)
5609 * cgn_fabric_warn_cnt (Congestion Warnings)
5610 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5611 * cgn_fabric_alarm_cnt (Congestion Alarms)
5613 index = ++cp->cgn_index_minute;
5614 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5615 cp->cgn_index_minute = 0;
5619 /* Get the number of driver events in this sample and reset counter */
5620 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5621 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5623 /* Get the number of warning events - FPIN and Signal for this minute */
5625 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5626 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5627 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5628 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5629 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5631 /* Get the number of alarm events - FPIN and Signal for this minute */
5633 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5634 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5635 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5636 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5638 /* Collect the driver, warning, alarm and latency counts for this
5639 * minute into the driver congestion buffer.
5641 ptr = &cp->cgn_drvr_min[index];
5642 value = (uint16_t)dvalue;
5643 *ptr = cpu_to_le16(value);
5645 ptr = &cp->cgn_warn_min[index];
5646 value = (uint16_t)wvalue;
5647 *ptr = cpu_to_le16(value);
5649 ptr = &cp->cgn_alarm_min[index];
5650 value = (uint16_t)avalue;
5651 *ptr = cpu_to_le16(value);
5653 lptr = &cp->cgn_latency_min[index];
5655 lvalue = (uint32_t)div_u64(latsum, lvalue);
5656 *lptr = cpu_to_le32(lvalue);
5661 /* Collect the bandwidth value into the driver's congesion buffer. */
5662 mptr = &cp->cgn_bw_min[index];
5663 *mptr = cpu_to_le16(mvalue);
5665 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5666 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5667 index, dvalue, wvalue, *lptr, mvalue, avalue);
5670 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5671 /* Record congestion buffer info - every hour
5672 * Collapse all minutes into an hour
5674 index = ++cp->cgn_index_hour;
5675 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5676 cp->cgn_index_hour = 0;
5686 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5687 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5688 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5689 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5690 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5691 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5693 if (lvalue) /* Avg of latency averages */
5694 lvalue /= LPFC_MIN_HOUR;
5695 if (mbps) /* Avg of Bandwidth averages */
5696 mvalue = mbps / LPFC_MIN_HOUR;
5698 lptr = &cp->cgn_drvr_hr[index];
5699 *lptr = cpu_to_le32(dvalue);
5700 lptr = &cp->cgn_warn_hr[index];
5701 *lptr = cpu_to_le32(wvalue);
5702 lptr = &cp->cgn_latency_hr[index];
5703 *lptr = cpu_to_le32(lvalue);
5704 mptr = &cp->cgn_bw_hr[index];
5705 *mptr = cpu_to_le16(mvalue);
5706 lptr = &cp->cgn_alarm_hr[index];
5707 *lptr = cpu_to_le32(avalue);
5709 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5710 "2419 Congestion Info - hour "
5711 "(%d): %d %d %d %d %d\n",
5712 index, dvalue, wvalue, lvalue, mvalue, avalue);
5716 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5717 /* Record congestion buffer info - every hour
5718 * Collapse all hours into a day. Rotate days
5719 * after LPFC_MAX_CGN_DAYS.
5721 index = ++cp->cgn_index_day;
5722 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5723 cp->cgn_index_day = 0;
5727 /* Anytime we overwrite daily index 0, after we wrap,
5728 * we will be overwriting the oldest day, so we must
5729 * update the congestion data start time for that day.
5730 * That start time should have previously been saved after
5731 * we wrote the last days worth of data.
5733 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5734 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5736 cp->cgn_info_month = broken.tm_mon + 1;
5737 cp->cgn_info_day = broken.tm_mday;
5738 cp->cgn_info_year = broken.tm_year - 100;
5739 cp->cgn_info_hour = broken.tm_hour;
5740 cp->cgn_info_minute = broken.tm_min;
5741 cp->cgn_info_second = broken.tm_sec;
5744 (phba, KERN_INFO, LOG_CGN_MGMT,
5745 "2646 CGNInfo idx0 Start Time: "
5746 "%d/%d/%d %d:%d:%d\n",
5747 cp->cgn_info_day, cp->cgn_info_month,
5748 cp->cgn_info_year, cp->cgn_info_hour,
5749 cp->cgn_info_minute, cp->cgn_info_second);
5758 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5759 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5760 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5761 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5762 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5763 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5765 if (lvalue) /* Avg of latency averages */
5766 lvalue /= LPFC_HOUR_DAY;
5767 if (mbps) /* Avg of Bandwidth averages */
5768 mvalue = mbps / LPFC_HOUR_DAY;
5770 lptr = &cp->cgn_drvr_day[index];
5771 *lptr = cpu_to_le32(dvalue);
5772 lptr = &cp->cgn_warn_day[index];
5773 *lptr = cpu_to_le32(wvalue);
5774 lptr = &cp->cgn_latency_day[index];
5775 *lptr = cpu_to_le32(lvalue);
5776 mptr = &cp->cgn_bw_day[index];
5777 *mptr = cpu_to_le16(mvalue);
5778 lptr = &cp->cgn_alarm_day[index];
5779 *lptr = cpu_to_le32(avalue);
5781 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5782 "2420 Congestion Info - daily (%d): "
5784 index, dvalue, wvalue, lvalue, mvalue, avalue);
5786 /* We just wrote LPFC_MAX_CGN_DAYS of data,
5787 * so we are wrapped on any data after this.
5788 * Save this as the start time for the next day.
5790 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5791 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5792 ktime_get_real_ts64(&phba->cgn_daily_ts);
5796 /* Use the frequency found in the last rcv'ed FPIN */
5797 value = phba->cgn_fpin_frequency;
5798 cp->cgn_warn_freq = cpu_to_le16(value);
5799 cp->cgn_alarm_freq = cpu_to_le16(value);
5801 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5802 LPFC_CGN_CRC32_SEED);
5803 cp->cgn_info_crc = cpu_to_le32(lvalue);
5807 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5808 * @phba: The Hba for which this call is being executed.
5810 * The routine calculates the latency from the beginning of the CMF timer
5811 * interval to the current point in time. It is called from IO completion
5812 * when we exceed our Bandwidth limitation for the time interval.
5815 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5817 struct timespec64 cmpl_time;
5820 ktime_get_real_ts64(&cmpl_time);
5822 /* This routine works on a ms granularity so sec and usec are
5823 * converted accordingly.
5825 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5826 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5829 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5830 msec = (cmpl_time.tv_sec -
5831 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5832 msec += ((cmpl_time.tv_nsec -
5833 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5835 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5837 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5838 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5845 * lpfc_cmf_timer - This is the timer function for one congestion
5847 * @timer: Pointer to the high resolution timer that expired
5849 static enum hrtimer_restart
5850 lpfc_cmf_timer(struct hrtimer *timer)
5852 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5854 struct rxtable_entry *entry;
5856 uint32_t head, tail;
5857 uint32_t busy, max_read;
5858 uint64_t total, rcv, lat, mbpi;
5859 int timer_interval = LPFC_CMF_INTERVAL;
5861 struct lpfc_cgn_stat *cgs;
5864 /* Only restart the timer if congestion mgmt is on */
5865 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5866 !phba->cmf_latency.tv_sec) {
5867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5868 "6224 CMF timer exit: %d %lld\n",
5869 phba->cmf_active_mode,
5870 (uint64_t)phba->cmf_latency.tv_sec);
5871 return HRTIMER_NORESTART;
5874 /* If pport is not ready yet, just exit and wait for
5875 * the next timer cycle to hit.
5880 /* Do not block SCSI IO while in the timer routine since
5881 * total_bytes will be cleared
5883 atomic_set(&phba->cmf_stop_io, 1);
5885 /* First we need to calculate the actual ms between
5886 * the last timer interrupt and this one. We ask for
5887 * LPFC_CMF_INTERVAL, however the actual time may
5888 * vary depending on system overhead.
5890 ms = lpfc_calc_cmf_latency(phba);
5893 /* Immediately after we calculate the time since the last
5894 * timer interrupt, set the start time for the next
5897 ktime_get_real_ts64(&phba->cmf_latency);
5899 phba->cmf_link_byte_count =
5900 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
5902 /* Collect all the stats from the prior timer interval */
5907 for_each_present_cpu(cpu) {
5908 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
5909 total += atomic64_xchg(&cgs->total_bytes, 0);
5910 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
5911 lat += atomic64_xchg(&cgs->rx_latency, 0);
5912 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
5915 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
5916 * returned from the last CMF_SYNC_WQE issued, from
5917 * cmf_last_sync_bw. This will be the target BW for
5918 * this next timer interval.
5920 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
5921 phba->link_state != LPFC_LINK_DOWN &&
5922 phba->hba_flag & HBA_SETUP) {
5923 mbpi = phba->cmf_last_sync_bw;
5924 phba->cmf_last_sync_bw = 0;
5925 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
5927 /* For Monitor mode or link down we want mbpi
5928 * to be the full link speed
5930 mbpi = phba->cmf_link_byte_count;
5932 phba->cmf_timer_cnt++;
5935 /* Update congestion info buffer latency in us */
5936 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
5937 atomic64_add(lat, &phba->cgn_latency_evt);
5939 busy = atomic_xchg(&phba->cmf_busy, 0);
5940 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
5942 /* Calculate MBPI for the next timer interval */
5944 if (mbpi > phba->cmf_link_byte_count ||
5945 phba->cmf_active_mode == LPFC_CFG_MONITOR)
5946 mbpi = phba->cmf_link_byte_count;
5948 /* Change max_bytes_per_interval to what the prior
5949 * CMF_SYNC_WQE cmpl indicated.
5951 if (mbpi != phba->cmf_max_bytes_per_interval)
5952 phba->cmf_max_bytes_per_interval = mbpi;
5955 /* Save rxmonitor information for debug */
5956 if (phba->rxtable) {
5957 head = atomic_xchg(&phba->rxtable_idx_head,
5958 LPFC_RXMONITOR_TABLE_IN_USE);
5959 entry = &phba->rxtable[head];
5960 entry->total_bytes = total;
5961 entry->rcv_bytes = rcv;
5962 entry->cmf_busy = busy;
5963 entry->cmf_info = phba->cmf_active_info;
5965 entry->avg_io_latency = div_u64(lat, io_cnt);
5966 entry->avg_io_size = div_u64(rcv, io_cnt);
5968 entry->avg_io_latency = 0;
5969 entry->avg_io_size = 0;
5971 entry->max_read_cnt = max_read;
5972 entry->io_cnt = io_cnt;
5973 entry->max_bytes_per_interval = mbpi;
5974 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
5975 entry->timer_utilization = phba->cmf_last_ts;
5977 entry->timer_utilization = ms;
5978 entry->timer_interval = ms;
5979 phba->cmf_last_ts = 0;
5981 /* Increment rxtable index */
5982 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5983 tail = atomic_read(&phba->rxtable_idx_tail);
5985 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
5986 atomic_set(&phba->rxtable_idx_tail, tail);
5988 atomic_set(&phba->rxtable_idx_head, head);
5991 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
5992 /* If Monitor mode, check if we are oversubscribed
5993 * against the full line rate.
5995 if (mbpi && total > mbpi)
5996 atomic_inc(&phba->cgn_driver_evt_cnt);
5998 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6000 /* Each minute save Fabric and Driver congestion information */
6001 lpfc_cgn_save_evt_cnt(phba);
6003 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6004 * minute, adjust our next timer interval, if needed, to ensure a
6005 * 1 minute granularity when we get the next timer interrupt.
6007 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6008 phba->cgn_evt_timestamp)) {
6009 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6011 if (timer_interval <= 0)
6012 timer_interval = LPFC_CMF_INTERVAL;
6014 /* If we adjust timer_interval, max_bytes_per_interval
6015 * needs to be adjusted as well.
6017 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6018 timer_interval, 1000);
6019 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6020 phba->cmf_max_bytes_per_interval =
6021 phba->cmf_link_byte_count;
6024 /* Since total_bytes has already been zero'ed, its okay to unblock
6025 * after max_bytes_per_interval is setup.
6027 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6028 queue_work(phba->wq, &phba->unblock_request_work);
6030 /* SCSI IO is now unblocked */
6031 atomic_set(&phba->cmf_stop_io, 0);
6034 hrtimer_forward_now(timer,
6035 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6036 return HRTIMER_RESTART;
6039 #define trunk_link_status(__idx)\
6040 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6041 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6042 "Link up" : "Link down") : "NA"
6043 /* Did port __idx reported an error */
6044 #define trunk_port_fault(__idx)\
6045 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6046 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6049 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6050 struct lpfc_acqe_fc_la *acqe_fc)
6052 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6053 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6055 phba->sli4_hba.link_state.speed =
6056 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6057 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6059 phba->sli4_hba.link_state.logical_speed =
6060 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6061 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6062 phba->fc_linkspeed =
6063 lpfc_async_link_speed_to_read_top(
6065 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6067 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6068 phba->trunk_link.link0.state =
6069 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6070 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6071 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6073 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6074 phba->trunk_link.link1.state =
6075 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6076 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6077 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6079 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6080 phba->trunk_link.link2.state =
6081 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6082 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6083 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6085 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6086 phba->trunk_link.link3.state =
6087 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6088 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6089 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6093 "2910 Async FC Trunking Event - Speed:%d\n"
6094 "\tLogical speed:%d "
6095 "port0: %s port1: %s port2: %s port3: %s\n",
6096 phba->sli4_hba.link_state.speed,
6097 phba->sli4_hba.link_state.logical_speed,
6098 trunk_link_status(0), trunk_link_status(1),
6099 trunk_link_status(2), trunk_link_status(3));
6101 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6102 lpfc_cmf_signal_init(phba);
6105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6106 "3202 trunk error:0x%x (%s) seen on port0:%s "
6108 * SLI-4: We have only 0xA error codes
6109 * defined as of now. print an appropriate
6110 * message in case driver needs to be updated.
6112 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6113 "UNDEFINED. update driver." : trunk_errmsg[err],
6114 trunk_port_fault(0), trunk_port_fault(1),
6115 trunk_port_fault(2), trunk_port_fault(3));
6120 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6121 * @phba: pointer to lpfc hba data structure.
6122 * @acqe_fc: pointer to the async fc completion queue entry.
6124 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6125 * that the event was received and then issue a read_topology mailbox command so
6126 * that the rest of the driver will treat it the same as SLI3.
6129 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6131 struct lpfc_dmabuf *mp;
6134 struct lpfc_mbx_read_top *la;
6137 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6138 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6140 "2895 Non FC link Event detected.(%d)\n",
6141 bf_get(lpfc_trailer_type, acqe_fc));
6145 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6146 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6147 lpfc_update_trunk_link_status(phba, acqe_fc);
6151 /* Keep the link status for extra SLI4 state machine reference */
6152 phba->sli4_hba.link_state.speed =
6153 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6154 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6155 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6156 phba->sli4_hba.link_state.topology =
6157 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6158 phba->sli4_hba.link_state.status =
6159 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6160 phba->sli4_hba.link_state.type =
6161 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6162 phba->sli4_hba.link_state.number =
6163 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6164 phba->sli4_hba.link_state.fault =
6165 bf_get(lpfc_acqe_link_fault, acqe_fc);
6167 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6168 LPFC_FC_LA_TYPE_LINK_DOWN)
6169 phba->sli4_hba.link_state.logical_speed = 0;
6170 else if (!phba->sli4_hba.conf_trunk)
6171 phba->sli4_hba.link_state.logical_speed =
6172 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6174 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6175 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6176 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6177 "%dMbps Fault:%d\n",
6178 phba->sli4_hba.link_state.speed,
6179 phba->sli4_hba.link_state.topology,
6180 phba->sli4_hba.link_state.status,
6181 phba->sli4_hba.link_state.type,
6182 phba->sli4_hba.link_state.number,
6183 phba->sli4_hba.link_state.logical_speed,
6184 phba->sli4_hba.link_state.fault);
6185 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6188 "2897 The mboxq allocation failed\n");
6191 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6194 "2898 The lpfc_dmabuf allocation failed\n");
6197 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
6199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6200 "2899 The mbuf allocation failed\n");
6201 goto out_free_dmabuf;
6204 /* Cleanup any outstanding ELS commands */
6205 lpfc_els_flush_all_cmd(phba);
6207 /* Block ELS IOCBs until we have done process link event */
6208 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6210 /* Update link event statistics */
6211 phba->sli.slistat.link_event++;
6213 /* Create lpfc_handle_latt mailbox command from link ACQE */
6214 lpfc_read_topology(phba, pmb, mp);
6215 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6216 pmb->vport = phba->pport;
6218 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6219 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6221 switch (phba->sli4_hba.link_state.status) {
6222 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6223 phba->link_flag |= LS_MDS_LINK_DOWN;
6225 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6226 phba->link_flag |= LS_MDS_LOOPBACK;
6232 /* Initialize completion status */
6234 mb->mbxStatus = MBX_SUCCESS;
6236 /* Parse port fault information field */
6237 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6239 /* Parse and translate link attention fields */
6240 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6241 la->eventTag = acqe_fc->event_tag;
6243 if (phba->sli4_hba.link_state.status ==
6244 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6245 bf_set(lpfc_mbx_read_top_att_type, la,
6246 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6248 bf_set(lpfc_mbx_read_top_att_type, la,
6249 LPFC_FC_LA_TYPE_LINK_DOWN);
6251 /* Invoke the mailbox command callback function */
6252 lpfc_mbx_cmpl_read_topology(phba, pmb);
6257 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6258 if (rc == MBX_NOT_FINISHED) {
6259 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6260 goto out_free_dmabuf;
6267 mempool_free(pmb, phba->mbox_mem_pool);
6271 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6272 * @phba: pointer to lpfc hba data structure.
6273 * @acqe_sli: pointer to the async SLI completion queue entry.
6275 * This routine is to handle the SLI4 asynchronous SLI events.
6278 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6284 uint8_t operational = 0;
6285 struct temp_event temp_event_data;
6286 struct lpfc_acqe_misconfigured_event *misconfigured;
6287 struct lpfc_acqe_cgn_signal *cgn_signal;
6288 struct Scsi_Host *shost;
6289 struct lpfc_vport **vports;
6292 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6294 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6295 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6296 "x%08x x%08x x%08x\n", evt_type,
6297 acqe_sli->event_data1, acqe_sli->event_data2,
6298 acqe_sli->reserved, acqe_sli->trailer);
6300 port_name = phba->Port[0];
6301 if (port_name == 0x00)
6302 port_name = '?'; /* get port name is empty */
6305 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6306 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6307 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6308 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6310 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6311 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6312 acqe_sli->event_data1, port_name);
6314 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6315 shost = lpfc_shost_from_vport(phba->pport);
6316 fc_host_post_vendor_event(shost, fc_get_event_number(),
6317 sizeof(temp_event_data),
6318 (char *)&temp_event_data,
6319 SCSI_NL_VID_TYPE_PCI
6320 | PCI_VENDOR_ID_EMULEX);
6322 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6323 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6324 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6325 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6327 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6328 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6329 acqe_sli->event_data1, port_name);
6331 shost = lpfc_shost_from_vport(phba->pport);
6332 fc_host_post_vendor_event(shost, fc_get_event_number(),
6333 sizeof(temp_event_data),
6334 (char *)&temp_event_data,
6335 SCSI_NL_VID_TYPE_PCI
6336 | PCI_VENDOR_ID_EMULEX);
6338 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6339 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6340 &acqe_sli->event_data1;
6342 /* fetch the status for this port */
6343 switch (phba->sli4_hba.lnk_info.lnk_no) {
6344 case LPFC_LINK_NUMBER_0:
6345 status = bf_get(lpfc_sli_misconfigured_port0_state,
6346 &misconfigured->theEvent);
6347 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6348 &misconfigured->theEvent);
6350 case LPFC_LINK_NUMBER_1:
6351 status = bf_get(lpfc_sli_misconfigured_port1_state,
6352 &misconfigured->theEvent);
6353 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6354 &misconfigured->theEvent);
6356 case LPFC_LINK_NUMBER_2:
6357 status = bf_get(lpfc_sli_misconfigured_port2_state,
6358 &misconfigured->theEvent);
6359 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6360 &misconfigured->theEvent);
6362 case LPFC_LINK_NUMBER_3:
6363 status = bf_get(lpfc_sli_misconfigured_port3_state,
6364 &misconfigured->theEvent);
6365 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6366 &misconfigured->theEvent);
6369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6371 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6372 "event: Invalid link %d",
6373 phba->sli4_hba.lnk_info.lnk_no);
6377 /* Skip if optic state unchanged */
6378 if (phba->sli4_hba.lnk_info.optic_state == status)
6382 case LPFC_SLI_EVENT_STATUS_VALID:
6383 sprintf(message, "Physical Link is functional");
6385 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6386 sprintf(message, "Optics faulted/incorrectly "
6387 "installed/not installed - Reseat optics, "
6388 "if issue not resolved, replace.");
6390 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6392 "Optics of two types installed - Remove one "
6393 "optic or install matching pair of optics.");
6395 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6396 sprintf(message, "Incompatible optics - Replace with "
6397 "compatible optics for card to function.");
6399 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6400 sprintf(message, "Unqualified optics - Replace with "
6401 "Avago optics for Warranty and Technical "
6402 "Support - Link is%s operational",
6403 (operational) ? " not" : "");
6405 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6406 sprintf(message, "Uncertified optics - Replace with "
6407 "Avago-certified optics to enable link "
6408 "operation - Link is%s operational",
6409 (operational) ? " not" : "");
6412 /* firmware is reporting a status we don't know about */
6413 sprintf(message, "Unknown event status x%02x", status);
6417 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6418 rc = lpfc_sli4_read_config(phba);
6421 lpfc_printf_log(phba, KERN_ERR,
6423 "3194 Unable to retrieve supported "
6424 "speeds, rc = 0x%x\n", rc);
6426 vports = lpfc_create_vport_work_array(phba);
6427 if (vports != NULL) {
6428 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6430 shost = lpfc_shost_from_vport(vports[i]);
6431 lpfc_host_supported_speeds_set(shost);
6434 lpfc_destroy_vport_work_array(phba, vports);
6436 phba->sli4_hba.lnk_info.optic_state = status;
6437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6438 "3176 Port Name %c %s\n", port_name, message);
6440 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6441 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6442 "3192 Remote DPort Test Initiated - "
6443 "Event Data1:x%08x Event Data2: x%08x\n",
6444 acqe_sli->event_data1, acqe_sli->event_data2);
6446 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6447 /* Call FW to obtain active parms */
6448 lpfc_sli4_cgn_parm_chg_evt(phba);
6450 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6451 /* Misconfigured WWN. Reports that the SLI Port is configured
6452 * to use FA-WWN, but the attached device doesn’t support it.
6453 * No driver action is required.
6454 * Event Data1 - N.A, Event Data2 - N.A
6456 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
6457 "2699 Misconfigured FA-WWN - Attached device does "
6458 "not support FA-WWN\n");
6460 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6461 /* EEPROM failure. No driver action is required */
6462 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6463 "2518 EEPROM failure - "
6464 "Event Data1: x%08x Event Data2: x%08x\n",
6465 acqe_sli->event_data1, acqe_sli->event_data2);
6467 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6468 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6470 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6471 &acqe_sli->event_data1;
6472 phba->cgn_acqe_cnt++;
6474 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6475 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6476 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6478 /* no threshold for CMF, even 1 signal will trigger an event */
6480 /* Alarm overrides warning, so check that first */
6481 if (cgn_signal->alarm_cnt) {
6482 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6483 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6484 atomic_add(cgn_signal->alarm_cnt,
6485 &phba->cgn_sync_alarm_cnt);
6488 /* signal action needs to be taken */
6489 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6490 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6491 /* Keep track of warning cnt for CMF_SYNC_WQE */
6492 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6497 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6498 "3193 Unrecognized SLI event, type: 0x%x",
6505 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6506 * @vport: pointer to vport data structure.
6508 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6509 * response to a CVL event.
6511 * Return the pointer to the ndlp with the vport if successful, otherwise
6514 static struct lpfc_nodelist *
6515 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6517 struct lpfc_nodelist *ndlp;
6518 struct Scsi_Host *shost;
6519 struct lpfc_hba *phba;
6526 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6528 /* Cannot find existing Fabric ndlp, so allocate a new one */
6529 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6532 /* Set the node type */
6533 ndlp->nlp_type |= NLP_FABRIC;
6534 /* Put ndlp onto node list */
6535 lpfc_enqueue_node(vport, ndlp);
6537 if ((phba->pport->port_state < LPFC_FLOGI) &&
6538 (phba->pport->port_state != LPFC_VPORT_FAILED))
6540 /* If virtual link is not yet instantiated ignore CVL */
6541 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6542 && (vport->port_state != LPFC_VPORT_FAILED))
6544 shost = lpfc_shost_from_vport(vport);
6547 lpfc_linkdown_port(vport);
6548 lpfc_cleanup_pending_mbox(vport);
6549 spin_lock_irq(shost->host_lock);
6550 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6551 spin_unlock_irq(shost->host_lock);
6557 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6558 * @phba: pointer to lpfc hba data structure.
6560 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6561 * response to a FCF dead event.
6564 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6566 struct lpfc_vport **vports;
6569 vports = lpfc_create_vport_work_array(phba);
6571 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6572 lpfc_sli4_perform_vport_cvl(vports[i]);
6573 lpfc_destroy_vport_work_array(phba, vports);
6577 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6578 * @phba: pointer to lpfc hba data structure.
6579 * @acqe_fip: pointer to the async fcoe completion queue entry.
6581 * This routine is to handle the SLI4 asynchronous fcoe event.
6584 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6585 struct lpfc_acqe_fip *acqe_fip)
6587 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6589 struct lpfc_vport *vport;
6590 struct lpfc_nodelist *ndlp;
6591 int active_vlink_present;
6592 struct lpfc_vport **vports;
6595 phba->fc_eventTag = acqe_fip->event_tag;
6596 phba->fcoe_eventtag = acqe_fip->event_tag;
6597 switch (event_type) {
6598 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6599 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6600 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6602 "2546 New FCF event, evt_tag:x%x, "
6604 acqe_fip->event_tag,
6607 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6609 "2788 FCF param modified event, "
6610 "evt_tag:x%x, index:x%x\n",
6611 acqe_fip->event_tag,
6613 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6615 * During period of FCF discovery, read the FCF
6616 * table record indexed by the event to update
6617 * FCF roundrobin failover eligible FCF bmask.
6619 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6621 "2779 Read FCF (x%x) for updating "
6622 "roundrobin FCF failover bmask\n",
6624 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6627 /* If the FCF discovery is in progress, do nothing. */
6628 spin_lock_irq(&phba->hbalock);
6629 if (phba->hba_flag & FCF_TS_INPROG) {
6630 spin_unlock_irq(&phba->hbalock);
6633 /* If fast FCF failover rescan event is pending, do nothing */
6634 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6635 spin_unlock_irq(&phba->hbalock);
6639 /* If the FCF has been in discovered state, do nothing. */
6640 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6641 spin_unlock_irq(&phba->hbalock);
6644 spin_unlock_irq(&phba->hbalock);
6646 /* Otherwise, scan the entire FCF table and re-discover SAN */
6647 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6648 "2770 Start FCF table scan per async FCF "
6649 "event, evt_tag:x%x, index:x%x\n",
6650 acqe_fip->event_tag, acqe_fip->index);
6651 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6652 LPFC_FCOE_FCF_GET_FIRST);
6654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6655 "2547 Issue FCF scan read FCF mailbox "
6656 "command failed (x%x)\n", rc);
6659 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6661 "2548 FCF Table full count 0x%x tag 0x%x\n",
6662 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6663 acqe_fip->event_tag);
6666 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6667 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6669 "2549 FCF (x%x) disconnected from network, "
6670 "tag:x%x\n", acqe_fip->index,
6671 acqe_fip->event_tag);
6673 * If we are in the middle of FCF failover process, clear
6674 * the corresponding FCF bit in the roundrobin bitmap.
6676 spin_lock_irq(&phba->hbalock);
6677 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6678 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6679 spin_unlock_irq(&phba->hbalock);
6680 /* Update FLOGI FCF failover eligible FCF bmask */
6681 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6684 spin_unlock_irq(&phba->hbalock);
6686 /* If the event is not for currently used fcf do nothing */
6687 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6691 * Otherwise, request the port to rediscover the entire FCF
6692 * table for a fast recovery from case that the current FCF
6693 * is no longer valid as we are not in the middle of FCF
6694 * failover process already.
6696 spin_lock_irq(&phba->hbalock);
6697 /* Mark the fast failover process in progress */
6698 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6699 spin_unlock_irq(&phba->hbalock);
6701 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6702 "2771 Start FCF fast failover process due to "
6703 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6704 "\n", acqe_fip->event_tag, acqe_fip->index);
6705 rc = lpfc_sli4_redisc_fcf_table(phba);
6707 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6709 "2772 Issue FCF rediscover mailbox "
6710 "command failed, fail through to FCF "
6712 spin_lock_irq(&phba->hbalock);
6713 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6714 spin_unlock_irq(&phba->hbalock);
6716 * Last resort will fail over by treating this
6717 * as a link down to FCF registration.
6719 lpfc_sli4_fcf_dead_failthrough(phba);
6721 /* Reset FCF roundrobin bmask for new discovery */
6722 lpfc_sli4_clear_fcf_rr_bmask(phba);
6724 * Handling fast FCF failover to a DEAD FCF event is
6725 * considered equalivant to receiving CVL to all vports.
6727 lpfc_sli4_perform_all_vport_cvl(phba);
6730 case LPFC_FIP_EVENT_TYPE_CVL:
6731 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6732 lpfc_printf_log(phba, KERN_ERR,
6734 "2718 Clear Virtual Link Received for VPI 0x%x"
6735 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6737 vport = lpfc_find_vport_by_vpid(phba,
6739 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6742 active_vlink_present = 0;
6744 vports = lpfc_create_vport_work_array(phba);
6746 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6748 if ((!(vports[i]->fc_flag &
6749 FC_VPORT_CVL_RCVD)) &&
6750 (vports[i]->port_state > LPFC_FDISC)) {
6751 active_vlink_present = 1;
6755 lpfc_destroy_vport_work_array(phba, vports);
6759 * Don't re-instantiate if vport is marked for deletion.
6760 * If we are here first then vport_delete is going to wait
6761 * for discovery to complete.
6763 if (!(vport->load_flag & FC_UNLOADING) &&
6764 active_vlink_present) {
6766 * If there are other active VLinks present,
6767 * re-instantiate the Vlink using FDISC.
6769 mod_timer(&ndlp->nlp_delayfunc,
6770 jiffies + msecs_to_jiffies(1000));
6771 spin_lock_irq(&ndlp->lock);
6772 ndlp->nlp_flag |= NLP_DELAY_TMO;
6773 spin_unlock_irq(&ndlp->lock);
6774 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6775 vport->port_state = LPFC_FDISC;
6778 * Otherwise, we request port to rediscover
6779 * the entire FCF table for a fast recovery
6780 * from possible case that the current FCF
6781 * is no longer valid if we are not already
6782 * in the FCF failover process.
6784 spin_lock_irq(&phba->hbalock);
6785 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6786 spin_unlock_irq(&phba->hbalock);
6789 /* Mark the fast failover process in progress */
6790 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6791 spin_unlock_irq(&phba->hbalock);
6792 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6794 "2773 Start FCF failover per CVL, "
6795 "evt_tag:x%x\n", acqe_fip->event_tag);
6796 rc = lpfc_sli4_redisc_fcf_table(phba);
6798 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6800 "2774 Issue FCF rediscover "
6801 "mailbox command failed, "
6802 "through to CVL event\n");
6803 spin_lock_irq(&phba->hbalock);
6804 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6805 spin_unlock_irq(&phba->hbalock);
6807 * Last resort will be re-try on the
6808 * the current registered FCF entry.
6810 lpfc_retry_pport_discovery(phba);
6813 * Reset FCF roundrobin bmask for new
6816 lpfc_sli4_clear_fcf_rr_bmask(phba);
6820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6821 "0288 Unknown FCoE event type 0x%x event tag "
6822 "0x%x\n", event_type, acqe_fip->event_tag);
6828 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
6829 * @phba: pointer to lpfc hba data structure.
6830 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
6832 * This routine is to handle the SLI4 asynchronous dcbx event.
6835 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6836 struct lpfc_acqe_dcbx *acqe_dcbx)
6838 phba->fc_eventTag = acqe_dcbx->event_tag;
6839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6840 "0290 The SLI4 DCBX asynchronous event is not "
6845 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
6846 * @phba: pointer to lpfc hba data structure.
6847 * @acqe_grp5: pointer to the async grp5 completion queue entry.
6849 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
6850 * is an asynchronous notified of a logical link speed change. The Port
6851 * reports the logical link speed in units of 10Mbps.
6854 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
6855 struct lpfc_acqe_grp5 *acqe_grp5)
6857 uint16_t prev_ll_spd;
6859 phba->fc_eventTag = acqe_grp5->event_tag;
6860 phba->fcoe_eventtag = acqe_grp5->event_tag;
6861 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
6862 phba->sli4_hba.link_state.logical_speed =
6863 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
6864 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6865 "2789 GRP5 Async Event: Updating logical link speed "
6866 "from %dMbps to %dMbps\n", prev_ll_spd,
6867 phba->sli4_hba.link_state.logical_speed);
6871 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
6872 * @phba: pointer to lpfc hba data structure.
6874 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
6875 * is an asynchronous notification of a request to reset CM stats.
6878 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
6882 lpfc_init_congestion_stat(phba);
6886 * lpfc_cgn_params_val - Validate FW congestion parameters.
6887 * @phba: pointer to lpfc hba data structure.
6888 * @p_cfg_param: pointer to FW provided congestion parameters.
6890 * This routine validates the congestion parameters passed
6891 * by the FW to the driver via an ACQE event.
6894 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
6896 spin_lock_irq(&phba->hbalock);
6898 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
6899 LPFC_CFG_MONITOR)) {
6900 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
6901 "6225 CMF mode param out of range: %d\n",
6902 p_cfg_param->cgn_param_mode);
6903 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
6906 spin_unlock_irq(&phba->hbalock);
6910 * lpfc_cgn_params_parse - Process a FW cong parm change event
6911 * @phba: pointer to lpfc hba data structure.
6912 * @p_cgn_param: pointer to a data buffer with the FW cong params.
6913 * @len: the size of pdata in bytes.
6915 * This routine validates the congestion management buffer signature
6916 * from the FW, validates the contents and makes corrections for
6917 * valid, in-range values. If the signature magic is correct and
6918 * after parameter validation, the contents are copied to the driver's
6919 * @phba structure. If the magic is incorrect, an error message is
6923 lpfc_cgn_params_parse(struct lpfc_hba *phba,
6924 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
6926 struct lpfc_cgn_info *cp;
6927 uint32_t crc, oldmode;
6929 /* Make sure the FW has encoded the correct magic number to
6930 * validate the congestion parameter in FW memory.
6932 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
6933 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
6934 "4668 FW cgn parm buffer data: "
6935 "magic 0x%x version %d mode %d "
6936 "level0 %d level1 %d "
6937 "level2 %d byte13 %d "
6938 "byte14 %d byte15 %d "
6939 "byte11 %d byte12 %d activeMode %d\n",
6940 p_cgn_param->cgn_param_magic,
6941 p_cgn_param->cgn_param_version,
6942 p_cgn_param->cgn_param_mode,
6943 p_cgn_param->cgn_param_level0,
6944 p_cgn_param->cgn_param_level1,
6945 p_cgn_param->cgn_param_level2,
6946 p_cgn_param->byte13,
6947 p_cgn_param->byte14,
6948 p_cgn_param->byte15,
6949 p_cgn_param->byte11,
6950 p_cgn_param->byte12,
6951 phba->cmf_active_mode);
6953 oldmode = phba->cmf_active_mode;
6955 /* Any parameters out of range are corrected to defaults
6956 * by this routine. No need to fail.
6958 lpfc_cgn_params_val(phba, p_cgn_param);
6960 /* Parameters are verified, move them into driver storage */
6961 spin_lock_irq(&phba->hbalock);
6962 memcpy(&phba->cgn_p, p_cgn_param,
6963 sizeof(struct lpfc_cgn_param));
6965 /* Update parameters in congestion info buffer now */
6967 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
6968 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
6969 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
6970 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
6971 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
6972 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
6973 LPFC_CGN_CRC32_SEED);
6974 cp->cgn_info_crc = cpu_to_le32(crc);
6976 spin_unlock_irq(&phba->hbalock);
6978 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
6982 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
6983 /* Turning CMF on */
6984 lpfc_cmf_start(phba);
6986 if (phba->link_state >= LPFC_LINK_UP) {
6987 phba->cgn_reg_fpin =
6988 phba->cgn_init_reg_fpin;
6989 phba->cgn_reg_signal =
6990 phba->cgn_init_reg_signal;
6991 lpfc_issue_els_edc(phba->pport, 0);
6995 case LPFC_CFG_MANAGED:
6996 switch (phba->cgn_p.cgn_param_mode) {
6998 /* Turning CMF off */
6999 lpfc_cmf_stop(phba);
7000 if (phba->link_state >= LPFC_LINK_UP)
7001 lpfc_issue_els_edc(phba->pport, 0);
7003 case LPFC_CFG_MONITOR:
7004 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7005 "4661 Switch from MANAGED to "
7007 phba->cmf_max_bytes_per_interval =
7008 phba->cmf_link_byte_count;
7010 /* Resume blocked IO - unblock on workqueue */
7011 queue_work(phba->wq,
7012 &phba->unblock_request_work);
7016 case LPFC_CFG_MONITOR:
7017 switch (phba->cgn_p.cgn_param_mode) {
7019 /* Turning CMF off */
7020 lpfc_cmf_stop(phba);
7021 if (phba->link_state >= LPFC_LINK_UP)
7022 lpfc_issue_els_edc(phba->pport, 0);
7024 case LPFC_CFG_MANAGED:
7025 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7026 "4662 Switch from MONITOR to "
7028 lpfc_cmf_signal_init(phba);
7034 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7035 "4669 FW cgn parm buf wrong magic 0x%x "
7036 "version %d\n", p_cgn_param->cgn_param_magic,
7037 p_cgn_param->cgn_param_version);
7042 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7043 * @phba: pointer to lpfc hba data structure.
7045 * This routine issues a read_object mailbox command to
7046 * get the congestion management parameters from the FW
7047 * parses it and updates the driver maintained values.
7050 * 0 if the object was empty
7051 * -Eval if an error was encountered
7052 * Count if bytes were read from object
7055 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7058 struct lpfc_cgn_param *p_cgn_param = NULL;
7062 /* Find out if the FW has a new set of congestion parameters. */
7063 len = sizeof(struct lpfc_cgn_param);
7064 pdata = kzalloc(len, GFP_KERNEL);
7065 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7068 /* 0 means no data. A negative means error. A positive means
7069 * bytes were copied.
7072 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7073 "4670 CGN RD OBJ returns no data\n");
7075 } else if (ret < 0) {
7076 /* Some error. Just exit and return it to the caller.*/
7080 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7081 "6234 READ CGN PARAMS Successful %d\n", len);
7083 /* Parse data pointer over len and update the phba congestion
7084 * parameters with values passed back. The receive rate values
7085 * may have been altered in FW, but take no action here.
7087 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7088 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7096 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7097 * @phba: pointer to lpfc hba data structure.
7099 * The FW generated Async ACQE SLI event calls this routine when
7100 * the event type is an SLI Internal Port Event and the Event Code
7101 * indicates a change to the FW maintained congestion parameters.
7103 * This routine executes a Read_Object mailbox call to obtain the
7104 * current congestion parameters maintained in FW and corrects
7105 * the driver's active congestion parameters.
7107 * The acqe event is not passed because there is no further data
7110 * Returns nonzero error if event processing encountered an error.
7111 * Zero otherwise for success.
7114 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7118 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7119 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7120 "4664 Cgn Evt when E2E off. Drop event\n");
7124 /* If the event is claiming an empty object, it's ok. A write
7125 * could have cleared it. Only error is a negative return
7128 ret = lpfc_sli4_cgn_params_read(phba);
7130 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7131 "4667 Error reading Cgn Params (%d)\n",
7134 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7135 "4673 CGN Event empty object.\n");
7141 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7142 * @phba: pointer to lpfc hba data structure.
7144 * This routine is invoked by the worker thread to process all the pending
7145 * SLI4 asynchronous events.
7147 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7149 struct lpfc_cq_event *cq_event;
7150 unsigned long iflags;
7152 /* First, declare the async event has been handled */
7153 spin_lock_irqsave(&phba->hbalock, iflags);
7154 phba->hba_flag &= ~ASYNC_EVENT;
7155 spin_unlock_irqrestore(&phba->hbalock, iflags);
7157 /* Now, handle all the async events */
7158 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7159 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7160 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7161 cq_event, struct lpfc_cq_event, list);
7162 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7165 /* Process the asynchronous event */
7166 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7167 case LPFC_TRAILER_CODE_LINK:
7168 lpfc_sli4_async_link_evt(phba,
7169 &cq_event->cqe.acqe_link);
7171 case LPFC_TRAILER_CODE_FCOE:
7172 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7174 case LPFC_TRAILER_CODE_DCBX:
7175 lpfc_sli4_async_dcbx_evt(phba,
7176 &cq_event->cqe.acqe_dcbx);
7178 case LPFC_TRAILER_CODE_GRP5:
7179 lpfc_sli4_async_grp5_evt(phba,
7180 &cq_event->cqe.acqe_grp5);
7182 case LPFC_TRAILER_CODE_FC:
7183 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7185 case LPFC_TRAILER_CODE_SLI:
7186 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7188 case LPFC_TRAILER_CODE_CMSTAT:
7189 lpfc_sli4_async_cmstat_evt(phba);
7192 lpfc_printf_log(phba, KERN_ERR,
7194 "1804 Invalid asynchronous event code: "
7195 "x%x\n", bf_get(lpfc_trailer_code,
7196 &cq_event->cqe.mcqe_cmpl));
7200 /* Free the completion event processed to the free pool */
7201 lpfc_sli4_cq_event_release(phba, cq_event);
7202 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7204 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7208 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7209 * @phba: pointer to lpfc hba data structure.
7211 * This routine is invoked by the worker thread to process FCF table
7212 * rediscovery pending completion event.
7214 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7218 spin_lock_irq(&phba->hbalock);
7219 /* Clear FCF rediscovery timeout event */
7220 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7221 /* Clear driver fast failover FCF record flag */
7222 phba->fcf.failover_rec.flag = 0;
7223 /* Set state for FCF fast failover */
7224 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7225 spin_unlock_irq(&phba->hbalock);
7227 /* Scan FCF table from the first entry to re-discover SAN */
7228 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7229 "2777 Start post-quiescent FCF table scan\n");
7230 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7233 "2747 Issue FCF scan read FCF mailbox "
7234 "command failed 0x%x\n", rc);
7238 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7239 * @phba: pointer to lpfc hba data structure.
7240 * @dev_grp: The HBA PCI-Device group number.
7242 * This routine is invoked to set up the per HBA PCI-Device group function
7243 * API jump table entries.
7245 * Return: 0 if success, otherwise -ENODEV
7248 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7252 /* Set up lpfc PCI-device group */
7253 phba->pci_dev_grp = dev_grp;
7255 /* The LPFC_PCI_DEV_OC uses SLI4 */
7256 if (dev_grp == LPFC_PCI_DEV_OC)
7257 phba->sli_rev = LPFC_SLI_REV4;
7259 /* Set up device INIT API function jump table */
7260 rc = lpfc_init_api_table_setup(phba, dev_grp);
7263 /* Set up SCSI API function jump table */
7264 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7267 /* Set up SLI API function jump table */
7268 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7271 /* Set up MBOX API function jump table */
7272 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7280 * lpfc_log_intr_mode - Log the active interrupt mode
7281 * @phba: pointer to lpfc hba data structure.
7282 * @intr_mode: active interrupt mode adopted.
7284 * This routine it invoked to log the currently used active interrupt mode
7287 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7289 switch (intr_mode) {
7291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7292 "0470 Enable INTx interrupt mode.\n");
7295 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7296 "0481 Enabled MSI interrupt mode.\n");
7299 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7300 "0480 Enabled MSI-X interrupt mode.\n");
7303 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7304 "0482 Illegal interrupt mode.\n");
7311 * lpfc_enable_pci_dev - Enable a generic PCI device.
7312 * @phba: pointer to lpfc hba data structure.
7314 * This routine is invoked to enable the PCI device that is common to all
7319 * other values - error
7322 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7324 struct pci_dev *pdev;
7326 /* Obtain PCI device reference */
7330 pdev = phba->pcidev;
7331 /* Enable PCI device */
7332 if (pci_enable_device_mem(pdev))
7334 /* Request PCI resource for the device */
7335 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7336 goto out_disable_device;
7337 /* Set up device as PCI master and save state for EEH */
7338 pci_set_master(pdev);
7339 pci_try_set_mwi(pdev);
7340 pci_save_state(pdev);
7342 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7343 if (pci_is_pcie(pdev))
7344 pdev->needs_freset = 1;
7349 pci_disable_device(pdev);
7351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7352 "1401 Failed to enable pci device\n");
7357 * lpfc_disable_pci_dev - Disable a generic PCI device.
7358 * @phba: pointer to lpfc hba data structure.
7360 * This routine is invoked to disable the PCI device that is common to all
7364 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7366 struct pci_dev *pdev;
7368 /* Obtain PCI device reference */
7372 pdev = phba->pcidev;
7373 /* Release PCI resource and disable PCI device */
7374 pci_release_mem_regions(pdev);
7375 pci_disable_device(pdev);
7381 * lpfc_reset_hba - Reset a hba
7382 * @phba: pointer to lpfc hba data structure.
7384 * This routine is invoked to reset a hba device. It brings the HBA
7385 * offline, performs a board restart, and then brings the board back
7386 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7387 * on outstanding mailbox commands.
7390 lpfc_reset_hba(struct lpfc_hba *phba)
7392 /* If resets are disabled then set error state and return. */
7393 if (!phba->cfg_enable_hba_reset) {
7394 phba->link_state = LPFC_HBA_ERROR;
7398 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7399 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7400 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7402 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7403 lpfc_sli_flush_io_rings(phba);
7406 lpfc_sli_brdrestart(phba);
7408 lpfc_unblock_mgmt_io(phba);
7412 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7413 * @phba: pointer to lpfc hba data structure.
7415 * This function enables the PCI SR-IOV virtual functions to a physical
7416 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7417 * enable the number of virtual functions to the physical function. As
7418 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7419 * API call does not considered as an error condition for most of the device.
7422 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7424 struct pci_dev *pdev = phba->pcidev;
7428 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7432 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7437 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7438 * @phba: pointer to lpfc hba data structure.
7439 * @nr_vfn: number of virtual functions to be enabled.
7441 * This function enables the PCI SR-IOV virtual functions to a physical
7442 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7443 * enable the number of virtual functions to the physical function. As
7444 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7445 * API call does not considered as an error condition for most of the device.
7448 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7450 struct pci_dev *pdev = phba->pcidev;
7451 uint16_t max_nr_vfn;
7454 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7455 if (nr_vfn > max_nr_vfn) {
7456 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7457 "3057 Requested vfs (%d) greater than "
7458 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7462 rc = pci_enable_sriov(pdev, nr_vfn);
7464 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7465 "2806 Failed to enable sriov on this device "
7466 "with vfn number nr_vf:%d, rc:%d\n",
7469 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7470 "2807 Successful enable sriov on this device "
7471 "with vfn number nr_vf:%d\n", nr_vfn);
7476 lpfc_unblock_requests_work(struct work_struct *work)
7478 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7479 unblock_request_work);
7481 lpfc_unblock_requests(phba);
7485 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7486 * @phba: pointer to lpfc hba data structure.
7488 * This routine is invoked to set up the driver internal resources before the
7489 * device specific resource setup to support the HBA device it attached to.
7493 * other values - error
7496 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7498 struct lpfc_sli *psli = &phba->sli;
7501 * Driver resources common to all SLI revisions
7503 atomic_set(&phba->fast_event_count, 0);
7504 atomic_set(&phba->dbg_log_idx, 0);
7505 atomic_set(&phba->dbg_log_cnt, 0);
7506 atomic_set(&phba->dbg_log_dmping, 0);
7507 spin_lock_init(&phba->hbalock);
7509 /* Initialize port_list spinlock */
7510 spin_lock_init(&phba->port_list_lock);
7511 INIT_LIST_HEAD(&phba->port_list);
7513 INIT_LIST_HEAD(&phba->work_list);
7514 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7516 /* Initialize the wait queue head for the kernel thread */
7517 init_waitqueue_head(&phba->work_waitq);
7519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7520 "1403 Protocols supported %s %s %s\n",
7521 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7523 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7525 (phba->nvmet_support ? "NVMET" : " "));
7527 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7528 spin_lock_init(&phba->scsi_buf_list_get_lock);
7529 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7530 spin_lock_init(&phba->scsi_buf_list_put_lock);
7531 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7533 /* Initialize the fabric iocb list */
7534 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7536 /* Initialize list to save ELS buffers */
7537 INIT_LIST_HEAD(&phba->elsbuf);
7539 /* Initialize FCF connection rec list */
7540 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7542 /* Initialize OAS configuration list */
7543 spin_lock_init(&phba->devicelock);
7544 INIT_LIST_HEAD(&phba->luns);
7546 /* MBOX heartbeat timer */
7547 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7548 /* Fabric block timer */
7549 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7550 /* EA polling mode timer */
7551 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7552 /* Heartbeat timer */
7553 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7555 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7557 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7558 lpfc_idle_stat_delay_work);
7559 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7564 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7565 * @phba: pointer to lpfc hba data structure.
7567 * This routine is invoked to set up the driver internal resources specific to
7568 * support the SLI-3 HBA device it attached to.
7572 * other values - error
7575 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7580 * Initialize timers used by driver
7583 /* FCP polling mode timer */
7584 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7586 /* Host attention work mask setup */
7587 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7588 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7590 /* Get all the module params for configuring this host */
7591 lpfc_get_cfgparam(phba);
7592 /* Set up phase-1 common device driver resources */
7594 rc = lpfc_setup_driver_resource_phase1(phba);
7598 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7599 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7600 /* check for menlo minimum sg count */
7601 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7602 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7605 if (!phba->sli.sli3_ring)
7606 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7607 sizeof(struct lpfc_sli_ring),
7609 if (!phba->sli.sli3_ring)
7613 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7614 * used to create the sg_dma_buf_pool must be dynamically calculated.
7617 if (phba->sli_rev == LPFC_SLI_REV4)
7618 entry_sz = sizeof(struct sli4_sge);
7620 entry_sz = sizeof(struct ulp_bde64);
7622 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7623 if (phba->cfg_enable_bg) {
7625 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7626 * the FCP rsp, and a BDE for each. Sice we have no control
7627 * over how many protection data segments the SCSI Layer
7628 * will hand us (ie: there could be one for every block
7629 * in the IO), we just allocate enough BDEs to accomidate
7630 * our max amount and we need to limit lpfc_sg_seg_cnt to
7631 * minimize the risk of running out.
7633 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7634 sizeof(struct fcp_rsp) +
7635 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7637 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7638 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7640 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7641 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7644 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7645 * the FCP rsp, a BDE for each, and a BDE for up to
7646 * cfg_sg_seg_cnt data segments.
7648 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7649 sizeof(struct fcp_rsp) +
7650 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7652 /* Total BDEs in BPL for scsi_sg_list */
7653 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7656 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7657 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7658 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7659 phba->cfg_total_seg_cnt);
7661 phba->max_vpi = LPFC_MAX_VPI;
7662 /* This will be set to correct value after config_port mbox */
7663 phba->max_vports = 0;
7666 * Initialize the SLI Layer to run with lpfc HBAs.
7668 lpfc_sli_setup(phba);
7669 lpfc_sli_queue_init(phba);
7671 /* Allocate device driver memory */
7672 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7675 phba->lpfc_sg_dma_buf_pool =
7676 dma_pool_create("lpfc_sg_dma_buf_pool",
7677 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7680 if (!phba->lpfc_sg_dma_buf_pool)
7683 phba->lpfc_cmd_rsp_buf_pool =
7684 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7686 sizeof(struct fcp_cmnd) +
7687 sizeof(struct fcp_rsp),
7690 if (!phba->lpfc_cmd_rsp_buf_pool)
7691 goto fail_free_dma_buf_pool;
7694 * Enable sr-iov virtual functions if supported and configured
7695 * through the module parameter.
7697 if (phba->cfg_sriov_nr_virtfn > 0) {
7698 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7699 phba->cfg_sriov_nr_virtfn);
7701 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7702 "2808 Requested number of SR-IOV "
7703 "virtual functions (%d) is not "
7705 phba->cfg_sriov_nr_virtfn);
7706 phba->cfg_sriov_nr_virtfn = 0;
7712 fail_free_dma_buf_pool:
7713 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7714 phba->lpfc_sg_dma_buf_pool = NULL;
7716 lpfc_mem_free(phba);
7721 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7722 * @phba: pointer to lpfc hba data structure.
7724 * This routine is invoked to unset the driver internal resources set up
7725 * specific for supporting the SLI-3 HBA device it attached to.
7728 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7730 /* Free device driver memory allocated */
7731 lpfc_mem_free_all(phba);
7737 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7738 * @phba: pointer to lpfc hba data structure.
7740 * This routine is invoked to set up the driver internal resources specific to
7741 * support the SLI-4 HBA device it attached to.
7745 * other values - error
7748 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7750 LPFC_MBOXQ_t *mboxq;
7752 int rc, i, max_buf_size;
7759 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7760 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7761 phba->sli4_hba.curr_disp_cpu = 0;
7763 /* Get all the module params for configuring this host */
7764 lpfc_get_cfgparam(phba);
7766 /* Set up phase-1 common device driver resources */
7767 rc = lpfc_setup_driver_resource_phase1(phba);
7771 /* Before proceed, wait for POST done and device ready */
7772 rc = lpfc_sli4_post_status_check(phba);
7776 /* Allocate all driver workqueues here */
7778 /* The lpfc_wq workqueue for deferred irq use */
7779 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7782 * Initialize timers used by driver
7785 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7787 /* FCF rediscover timer */
7788 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7790 /* CMF congestion timer */
7791 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7792 phba->cmf_timer.function = lpfc_cmf_timer;
7795 * Control structure for handling external multi-buffer mailbox
7796 * command pass-through.
7798 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7799 sizeof(struct lpfc_mbox_ext_buf_ctx));
7800 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7802 phba->max_vpi = LPFC_MAX_VPI;
7804 /* This will be set to correct value after the read_config mbox */
7805 phba->max_vports = 0;
7807 /* Program the default value of vlan_id and fc_map */
7808 phba->valid_vlan = 0;
7809 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7810 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7811 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7814 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7815 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7816 * The WQ create will allocate the ring.
7819 /* Initialize buffer queue management fields */
7820 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7821 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7822 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7824 /* for VMID idle timeout if VMID is enabled */
7825 if (lpfc_is_vmid_enabled(phba))
7826 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
7829 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
7831 /* Initialize the Abort buffer list used by driver */
7832 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
7833 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
7835 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7836 /* Initialize the Abort nvme buffer list used by driver */
7837 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
7838 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
7839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
7840 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
7841 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
7844 /* This abort list used by worker thread */
7845 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
7846 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
7847 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
7848 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
7851 * Initialize driver internal slow-path work queues
7854 /* Driver internel slow-path CQ Event pool */
7855 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
7856 /* Response IOCB work queue list */
7857 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
7858 /* Asynchronous event CQ Event work queue list */
7859 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
7860 /* Slow-path XRI aborted CQ Event work queue list */
7861 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
7862 /* Receive queue CQ Event work queue list */
7863 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
7865 /* Initialize extent block lists. */
7866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
7867 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
7868 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
7869 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
7871 /* Initialize mboxq lists. If the early init routines fail
7872 * these lists need to be correctly initialized.
7874 INIT_LIST_HEAD(&phba->sli.mboxq);
7875 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
7877 /* initialize optic_state to 0xFF */
7878 phba->sli4_hba.lnk_info.optic_state = 0xff;
7880 /* Allocate device driver memory */
7881 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
7885 /* IF Type 2 ports get initialized now. */
7886 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
7887 LPFC_SLI_INTF_IF_TYPE_2) {
7888 rc = lpfc_pci_function_reset(phba);
7893 phba->temp_sensor_support = 1;
7896 /* Create the bootstrap mailbox command */
7897 rc = lpfc_create_bootstrap_mbox(phba);
7901 /* Set up the host's endian order with the device. */
7902 rc = lpfc_setup_endian_order(phba);
7904 goto out_free_bsmbx;
7906 /* Set up the hba's configuration parameters. */
7907 rc = lpfc_sli4_read_config(phba);
7909 goto out_free_bsmbx;
7910 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
7912 goto out_free_bsmbx;
7914 /* IF Type 0 ports get initialized now. */
7915 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7916 LPFC_SLI_INTF_IF_TYPE_0) {
7917 rc = lpfc_pci_function_reset(phba);
7919 goto out_free_bsmbx;
7922 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7926 goto out_free_bsmbx;
7929 /* Check for NVMET being configured */
7930 phba->nvmet_support = 0;
7931 if (lpfc_enable_nvmet_cnt) {
7933 /* First get WWN of HBA instance */
7934 lpfc_read_nv(phba, mboxq);
7935 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7936 if (rc != MBX_SUCCESS) {
7937 lpfc_printf_log(phba, KERN_ERR,
7939 "6016 Mailbox failed , mbxCmd x%x "
7940 "READ_NV, mbxStatus x%x\n",
7941 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7942 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
7943 mempool_free(mboxq, phba->mbox_mem_pool);
7945 goto out_free_bsmbx;
7948 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
7950 wwn = cpu_to_be64(wwn);
7951 phba->sli4_hba.wwnn.u.name = wwn;
7952 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
7954 /* wwn is WWPN of HBA instance */
7955 wwn = cpu_to_be64(wwn);
7956 phba->sli4_hba.wwpn.u.name = wwn;
7958 /* Check to see if it matches any module parameter */
7959 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
7960 if (wwn == lpfc_enable_nvmet[i]) {
7961 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
7962 if (lpfc_nvmet_mem_alloc(phba))
7965 phba->nvmet_support = 1; /* a match */
7967 lpfc_printf_log(phba, KERN_ERR,
7969 "6017 NVME Target %016llx\n",
7972 lpfc_printf_log(phba, KERN_ERR,
7974 "6021 Can't enable NVME Target."
7975 " NVME_TARGET_FC infrastructure"
7976 " is not in kernel\n");
7978 /* Not supported for NVMET */
7979 phba->cfg_xri_rebalancing = 0;
7980 if (phba->irq_chann_mode == NHT_MODE) {
7981 phba->cfg_irq_chann =
7982 phba->sli4_hba.num_present_cpu;
7983 phba->cfg_hdw_queue =
7984 phba->sli4_hba.num_present_cpu;
7985 phba->irq_chann_mode = NORMAL_MODE;
7992 lpfc_nvme_mod_param_dep(phba);
7995 * Get sli4 parameters that override parameters from Port capabilities.
7996 * If this call fails, it isn't critical unless the SLI4 parameters come
7999 rc = lpfc_get_sli4_parameters(phba, mboxq);
8001 if_type = bf_get(lpfc_sli_intf_if_type,
8002 &phba->sli4_hba.sli_intf);
8003 if_fam = bf_get(lpfc_sli_intf_sli_family,
8004 &phba->sli4_hba.sli_intf);
8005 if (phba->sli4_hba.extents_in_use &&
8006 phba->sli4_hba.rpi_hdrs_in_use) {
8007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8008 "2999 Unsupported SLI4 Parameters "
8009 "Extents and RPI headers enabled.\n");
8010 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8011 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8012 mempool_free(mboxq, phba->mbox_mem_pool);
8014 goto out_free_bsmbx;
8017 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8018 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8019 mempool_free(mboxq, phba->mbox_mem_pool);
8021 goto out_free_bsmbx;
8026 * 1 for cmd, 1 for rsp, NVME adds an extra one
8027 * for boundary conditions in its max_sgl_segment template.
8030 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8034 * It doesn't matter what family our adapter is in, we are
8035 * limited to 2 Pages, 512 SGEs, for our SGL.
8036 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8038 max_buf_size = (2 * SLI4_PAGE_SIZE);
8041 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8042 * used to create the sg_dma_buf_pool must be calculated.
8044 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8045 /* Both cfg_enable_bg and cfg_external_dif code paths */
8048 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8049 * the FCP rsp, and a SGE. Sice we have no control
8050 * over how many protection segments the SCSI Layer
8051 * will hand us (ie: there could be one for every block
8052 * in the IO), just allocate enough SGEs to accomidate
8053 * our max amount and we need to limit lpfc_sg_seg_cnt
8054 * to minimize the risk of running out.
8056 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8057 sizeof(struct fcp_rsp) + max_buf_size;
8059 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8060 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8063 * If supporting DIF, reduce the seg count for scsi to
8064 * allow room for the DIF sges.
8066 if (phba->cfg_enable_bg &&
8067 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8068 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8070 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8074 * The scsi_buf for a regular I/O holds the FCP cmnd,
8075 * the FCP rsp, a SGE for each, and a SGE for up to
8076 * cfg_sg_seg_cnt data segments.
8078 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8079 sizeof(struct fcp_rsp) +
8080 ((phba->cfg_sg_seg_cnt + extra) *
8081 sizeof(struct sli4_sge));
8083 /* Total SGEs for scsi_sg_list */
8084 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8085 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8088 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8089 * need to post 1 page for the SGL.
8093 if (phba->cfg_xpsgl && !phba->nvmet_support)
8094 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8095 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8096 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8098 phba->cfg_sg_dma_buf_size =
8099 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8101 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8102 sizeof(struct sli4_sge);
8104 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8105 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8106 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8107 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8108 "6300 Reducing NVME sg segment "
8110 LPFC_MAX_NVME_SEG_CNT);
8111 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8113 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8116 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8117 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8118 "total:%d scsi:%d nvme:%d\n",
8119 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8120 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8121 phba->cfg_nvme_seg_cnt);
8123 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8124 i = phba->cfg_sg_dma_buf_size;
8128 phba->lpfc_sg_dma_buf_pool =
8129 dma_pool_create("lpfc_sg_dma_buf_pool",
8131 phba->cfg_sg_dma_buf_size,
8133 if (!phba->lpfc_sg_dma_buf_pool)
8134 goto out_free_bsmbx;
8136 phba->lpfc_cmd_rsp_buf_pool =
8137 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8139 sizeof(struct fcp_cmnd) +
8140 sizeof(struct fcp_rsp),
8142 if (!phba->lpfc_cmd_rsp_buf_pool)
8143 goto out_free_sg_dma_buf;
8145 mempool_free(mboxq, phba->mbox_mem_pool);
8147 /* Verify OAS is supported */
8148 lpfc_sli4_oas_verify(phba);
8150 /* Verify RAS support on adapter */
8151 lpfc_sli4_ras_init(phba);
8153 /* Verify all the SLI4 queues */
8154 rc = lpfc_sli4_queue_verify(phba);
8156 goto out_free_cmd_rsp_buf;
8158 /* Create driver internal CQE event pool */
8159 rc = lpfc_sli4_cq_event_pool_create(phba);
8161 goto out_free_cmd_rsp_buf;
8163 /* Initialize sgl lists per host */
8164 lpfc_init_sgl_list(phba);
8166 /* Allocate and initialize active sgl array */
8167 rc = lpfc_init_active_sgl_array(phba);
8169 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8170 "1430 Failed to initialize sgl list.\n");
8171 goto out_destroy_cq_event_pool;
8173 rc = lpfc_sli4_init_rpi_hdrs(phba);
8175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8176 "1432 Failed to initialize rpi headers.\n");
8177 goto out_free_active_sgl;
8180 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8181 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8182 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8184 if (!phba->fcf.fcf_rr_bmask) {
8185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8186 "2759 Failed allocate memory for FCF round "
8187 "robin failover bmask\n");
8189 goto out_remove_rpi_hdrs;
8192 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8193 sizeof(struct lpfc_hba_eq_hdl),
8195 if (!phba->sli4_hba.hba_eq_hdl) {
8196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8197 "2572 Failed allocate memory for "
8198 "fast-path per-EQ handle array\n");
8200 goto out_free_fcf_rr_bmask;
8203 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8204 sizeof(struct lpfc_vector_map_info),
8206 if (!phba->sli4_hba.cpu_map) {
8207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8208 "3327 Failed allocate memory for msi-x "
8209 "interrupt vector mapping\n");
8211 goto out_free_hba_eq_hdl;
8214 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8215 if (!phba->sli4_hba.eq_info) {
8216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8217 "3321 Failed allocation for per_cpu stats\n");
8219 goto out_free_hba_cpu_map;
8222 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8223 sizeof(*phba->sli4_hba.idle_stat),
8225 if (!phba->sli4_hba.idle_stat) {
8226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8227 "3390 Failed allocation for idle_stat\n");
8229 goto out_free_hba_eq_info;
8232 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8233 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8234 if (!phba->sli4_hba.c_stat) {
8235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8236 "3332 Failed allocating per cpu hdwq stats\n");
8238 goto out_free_hba_idle_stat;
8242 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8243 if (!phba->cmf_stat) {
8244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8245 "3331 Failed allocating per cpu cgn stats\n");
8247 goto out_free_hba_hdwq_info;
8251 * Enable sr-iov virtual functions if supported and configured
8252 * through the module parameter.
8254 if (phba->cfg_sriov_nr_virtfn > 0) {
8255 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8256 phba->cfg_sriov_nr_virtfn);
8258 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8259 "3020 Requested number of SR-IOV "
8260 "virtual functions (%d) is not "
8262 phba->cfg_sriov_nr_virtfn);
8263 phba->cfg_sriov_nr_virtfn = 0;
8269 out_free_hba_hdwq_info:
8270 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8271 free_percpu(phba->sli4_hba.c_stat);
8272 out_free_hba_idle_stat:
8274 kfree(phba->sli4_hba.idle_stat);
8275 out_free_hba_eq_info:
8276 free_percpu(phba->sli4_hba.eq_info);
8277 out_free_hba_cpu_map:
8278 kfree(phba->sli4_hba.cpu_map);
8279 out_free_hba_eq_hdl:
8280 kfree(phba->sli4_hba.hba_eq_hdl);
8281 out_free_fcf_rr_bmask:
8282 kfree(phba->fcf.fcf_rr_bmask);
8283 out_remove_rpi_hdrs:
8284 lpfc_sli4_remove_rpi_hdrs(phba);
8285 out_free_active_sgl:
8286 lpfc_free_active_sgl(phba);
8287 out_destroy_cq_event_pool:
8288 lpfc_sli4_cq_event_pool_destroy(phba);
8289 out_free_cmd_rsp_buf:
8290 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8291 phba->lpfc_cmd_rsp_buf_pool = NULL;
8292 out_free_sg_dma_buf:
8293 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8294 phba->lpfc_sg_dma_buf_pool = NULL;
8296 lpfc_destroy_bootstrap_mbox(phba);
8298 lpfc_mem_free(phba);
8303 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8304 * @phba: pointer to lpfc hba data structure.
8306 * This routine is invoked to unset the driver internal resources set up
8307 * specific for supporting the SLI-4 HBA device it attached to.
8310 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8312 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8314 free_percpu(phba->sli4_hba.eq_info);
8315 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8316 free_percpu(phba->sli4_hba.c_stat);
8318 free_percpu(phba->cmf_stat);
8319 kfree(phba->sli4_hba.idle_stat);
8321 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8322 kfree(phba->sli4_hba.cpu_map);
8323 phba->sli4_hba.num_possible_cpu = 0;
8324 phba->sli4_hba.num_present_cpu = 0;
8325 phba->sli4_hba.curr_disp_cpu = 0;
8326 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8328 /* Free memory allocated for fast-path work queue handles */
8329 kfree(phba->sli4_hba.hba_eq_hdl);
8331 /* Free the allocated rpi headers. */
8332 lpfc_sli4_remove_rpi_hdrs(phba);
8333 lpfc_sli4_remove_rpis(phba);
8335 /* Free eligible FCF index bmask */
8336 kfree(phba->fcf.fcf_rr_bmask);
8338 /* Free the ELS sgl list */
8339 lpfc_free_active_sgl(phba);
8340 lpfc_free_els_sgl_list(phba);
8341 lpfc_free_nvmet_sgl_list(phba);
8343 /* Free the completion queue EQ event pool */
8344 lpfc_sli4_cq_event_release_all(phba);
8345 lpfc_sli4_cq_event_pool_destroy(phba);
8347 /* Release resource identifiers. */
8348 lpfc_sli4_dealloc_resource_identifiers(phba);
8350 /* Free the bsmbx region. */
8351 lpfc_destroy_bootstrap_mbox(phba);
8353 /* Free the SLI Layer memory with SLI4 HBAs */
8354 lpfc_mem_free_all(phba);
8356 /* Free the current connect table */
8357 list_for_each_entry_safe(conn_entry, next_conn_entry,
8358 &phba->fcf_conn_rec_list, list) {
8359 list_del_init(&conn_entry->list);
8367 * lpfc_init_api_table_setup - Set up init api function jump table
8368 * @phba: The hba struct for which this call is being executed.
8369 * @dev_grp: The HBA PCI-Device group number.
8371 * This routine sets up the device INIT interface API function jump table
8374 * Returns: 0 - success, -ENODEV - failure.
8377 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8379 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8380 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8381 phba->lpfc_selective_reset = lpfc_selective_reset;
8383 case LPFC_PCI_DEV_LP:
8384 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8385 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8386 phba->lpfc_stop_port = lpfc_stop_port_s3;
8388 case LPFC_PCI_DEV_OC:
8389 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8390 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8391 phba->lpfc_stop_port = lpfc_stop_port_s4;
8394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8395 "1431 Invalid HBA PCI-device group: 0x%x\n",
8403 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8404 * @phba: pointer to lpfc hba data structure.
8406 * This routine is invoked to set up the driver internal resources after the
8407 * device specific resource setup to support the HBA device it attached to.
8411 * other values - error
8414 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8418 /* Startup the kernel thread for this host adapter. */
8419 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8420 "lpfc_worker_%d", phba->brd_no);
8421 if (IS_ERR(phba->worker_thread)) {
8422 error = PTR_ERR(phba->worker_thread);
8430 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8431 * @phba: pointer to lpfc hba data structure.
8433 * This routine is invoked to unset the driver internal resources set up after
8434 * the device specific resource setup for supporting the HBA device it
8438 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8441 flush_workqueue(phba->wq);
8442 destroy_workqueue(phba->wq);
8446 /* Stop kernel worker thread */
8447 if (phba->worker_thread)
8448 kthread_stop(phba->worker_thread);
8452 * lpfc_free_iocb_list - Free iocb list.
8453 * @phba: pointer to lpfc hba data structure.
8455 * This routine is invoked to free the driver's IOCB list and memory.
8458 lpfc_free_iocb_list(struct lpfc_hba *phba)
8460 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8462 spin_lock_irq(&phba->hbalock);
8463 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8464 &phba->lpfc_iocb_list, list) {
8465 list_del(&iocbq_entry->list);
8467 phba->total_iocbq_bufs--;
8469 spin_unlock_irq(&phba->hbalock);
8475 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8476 * @phba: pointer to lpfc hba data structure.
8477 * @iocb_count: number of requested iocbs
8479 * This routine is invoked to allocate and initizlize the driver's IOCB
8480 * list and set up the IOCB tag array accordingly.
8484 * other values - error
8487 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8489 struct lpfc_iocbq *iocbq_entry = NULL;
8493 /* Initialize and populate the iocb list per host. */
8494 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8495 for (i = 0; i < iocb_count; i++) {
8496 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8497 if (iocbq_entry == NULL) {
8498 printk(KERN_ERR "%s: only allocated %d iocbs of "
8499 "expected %d count. Unloading driver.\n",
8500 __func__, i, iocb_count);
8501 goto out_free_iocbq;
8504 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8507 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8508 "Unloading driver.\n", __func__);
8509 goto out_free_iocbq;
8511 iocbq_entry->sli4_lxritag = NO_XRI;
8512 iocbq_entry->sli4_xritag = NO_XRI;
8514 spin_lock_irq(&phba->hbalock);
8515 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8516 phba->total_iocbq_bufs++;
8517 spin_unlock_irq(&phba->hbalock);
8523 lpfc_free_iocb_list(phba);
8529 * lpfc_free_sgl_list - Free a given sgl list.
8530 * @phba: pointer to lpfc hba data structure.
8531 * @sglq_list: pointer to the head of sgl list.
8533 * This routine is invoked to free a give sgl list and memory.
8536 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8538 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8540 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8541 list_del(&sglq_entry->list);
8542 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8548 * lpfc_free_els_sgl_list - Free els sgl list.
8549 * @phba: pointer to lpfc hba data structure.
8551 * This routine is invoked to free the driver's els sgl list and memory.
8554 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8556 LIST_HEAD(sglq_list);
8558 /* Retrieve all els sgls from driver list */
8559 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8560 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8561 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8563 /* Now free the sgl list */
8564 lpfc_free_sgl_list(phba, &sglq_list);
8568 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8569 * @phba: pointer to lpfc hba data structure.
8571 * This routine is invoked to free the driver's nvmet sgl list and memory.
8574 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8576 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8577 LIST_HEAD(sglq_list);
8579 /* Retrieve all nvmet sgls from driver list */
8580 spin_lock_irq(&phba->hbalock);
8581 spin_lock(&phba->sli4_hba.sgl_list_lock);
8582 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8583 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8584 spin_unlock_irq(&phba->hbalock);
8586 /* Now free the sgl list */
8587 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8588 list_del(&sglq_entry->list);
8589 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8593 /* Update the nvmet_xri_cnt to reflect no current sgls.
8594 * The next initialization cycle sets the count and allocates
8595 * the sgls over again.
8597 phba->sli4_hba.nvmet_xri_cnt = 0;
8601 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8602 * @phba: pointer to lpfc hba data structure.
8604 * This routine is invoked to allocate the driver's active sgl memory.
8605 * This array will hold the sglq_entry's for active IOs.
8608 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8611 size = sizeof(struct lpfc_sglq *);
8612 size *= phba->sli4_hba.max_cfg_param.max_xri;
8614 phba->sli4_hba.lpfc_sglq_active_list =
8615 kzalloc(size, GFP_KERNEL);
8616 if (!phba->sli4_hba.lpfc_sglq_active_list)
8622 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8623 * @phba: pointer to lpfc hba data structure.
8625 * This routine is invoked to walk through the array of active sglq entries
8626 * and free all of the resources.
8627 * This is just a place holder for now.
8630 lpfc_free_active_sgl(struct lpfc_hba *phba)
8632 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8636 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8637 * @phba: pointer to lpfc hba data structure.
8639 * This routine is invoked to allocate and initizlize the driver's sgl
8640 * list and set up the sgl xritag tag array accordingly.
8644 lpfc_init_sgl_list(struct lpfc_hba *phba)
8646 /* Initialize and populate the sglq list per host/VF. */
8647 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8648 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8649 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8650 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8652 /* els xri-sgl book keeping */
8653 phba->sli4_hba.els_xri_cnt = 0;
8655 /* nvme xri-buffer book keeping */
8656 phba->sli4_hba.io_xri_cnt = 0;
8660 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8661 * @phba: pointer to lpfc hba data structure.
8663 * This routine is invoked to post rpi header templates to the
8664 * port for those SLI4 ports that do not support extents. This routine
8665 * posts a PAGE_SIZE memory region to the port to hold up to
8666 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8667 * and should be called only when interrupts are disabled.
8671 * -ERROR - otherwise.
8674 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8677 struct lpfc_rpi_hdr *rpi_hdr;
8679 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8680 if (!phba->sli4_hba.rpi_hdrs_in_use)
8682 if (phba->sli4_hba.extents_in_use)
8685 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8688 "0391 Error during rpi post operation\n");
8689 lpfc_sli4_remove_rpis(phba);
8697 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8698 * @phba: pointer to lpfc hba data structure.
8700 * This routine is invoked to allocate a single 4KB memory region to
8701 * support rpis and stores them in the phba. This single region
8702 * provides support for up to 64 rpis. The region is used globally
8706 * A valid rpi hdr on success.
8707 * A NULL pointer on any failure.
8709 struct lpfc_rpi_hdr *
8710 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8712 uint16_t rpi_limit, curr_rpi_range;
8713 struct lpfc_dmabuf *dmabuf;
8714 struct lpfc_rpi_hdr *rpi_hdr;
8717 * If the SLI4 port supports extents, posting the rpi header isn't
8718 * required. Set the expected maximum count and let the actual value
8719 * get set when extents are fully allocated.
8721 if (!phba->sli4_hba.rpi_hdrs_in_use)
8723 if (phba->sli4_hba.extents_in_use)
8726 /* The limit on the logical index is just the max_rpi count. */
8727 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8729 spin_lock_irq(&phba->hbalock);
8731 * Establish the starting RPI in this header block. The starting
8732 * rpi is normalized to a zero base because the physical rpi is
8735 curr_rpi_range = phba->sli4_hba.next_rpi;
8736 spin_unlock_irq(&phba->hbalock);
8738 /* Reached full RPI range */
8739 if (curr_rpi_range == rpi_limit)
8743 * First allocate the protocol header region for the port. The
8744 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8746 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8750 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8751 LPFC_HDR_TEMPLATE_SIZE,
8752 &dmabuf->phys, GFP_KERNEL);
8753 if (!dmabuf->virt) {
8755 goto err_free_dmabuf;
8758 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8760 goto err_free_coherent;
8763 /* Save the rpi header data for cleanup later. */
8764 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8766 goto err_free_coherent;
8768 rpi_hdr->dmabuf = dmabuf;
8769 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8770 rpi_hdr->page_count = 1;
8771 spin_lock_irq(&phba->hbalock);
8773 /* The rpi_hdr stores the logical index only. */
8774 rpi_hdr->start_rpi = curr_rpi_range;
8775 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8776 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8778 spin_unlock_irq(&phba->hbalock);
8782 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8783 dmabuf->virt, dmabuf->phys);
8790 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8791 * @phba: pointer to lpfc hba data structure.
8793 * This routine is invoked to remove all memory resources allocated
8794 * to support rpis for SLI4 ports not supporting extents. This routine
8795 * presumes the caller has released all rpis consumed by fabric or port
8796 * logins and is prepared to have the header pages removed.
8799 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8801 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8803 if (!phba->sli4_hba.rpi_hdrs_in_use)
8806 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8807 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8808 list_del(&rpi_hdr->list);
8809 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8810 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8811 kfree(rpi_hdr->dmabuf);
8815 /* There are no rpis available to the port now. */
8816 phba->sli4_hba.next_rpi = 0;
8820 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
8821 * @pdev: pointer to pci device data structure.
8823 * This routine is invoked to allocate the driver hba data structure for an
8824 * HBA device. If the allocation is successful, the phba reference to the
8825 * PCI device data structure is set.
8828 * pointer to @phba - successful
8831 static struct lpfc_hba *
8832 lpfc_hba_alloc(struct pci_dev *pdev)
8834 struct lpfc_hba *phba;
8836 /* Allocate memory for HBA structure */
8837 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
8839 dev_err(&pdev->dev, "failed to allocate hba struct\n");
8843 /* Set reference to PCI device in HBA structure */
8844 phba->pcidev = pdev;
8846 /* Assign an unused board number */
8847 phba->brd_no = lpfc_get_instance();
8848 if (phba->brd_no < 0) {
8852 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
8854 spin_lock_init(&phba->ct_ev_lock);
8855 INIT_LIST_HEAD(&phba->ct_ev_waiters);
8861 * lpfc_hba_free - Free driver hba data structure with a device.
8862 * @phba: pointer to lpfc hba data structure.
8864 * This routine is invoked to free the driver hba data structure with an
8868 lpfc_hba_free(struct lpfc_hba *phba)
8870 if (phba->sli_rev == LPFC_SLI_REV4)
8871 kfree(phba->sli4_hba.hdwq);
8873 /* Release the driver assigned board number */
8874 idr_remove(&lpfc_hba_index, phba->brd_no);
8876 /* Free memory allocated with sli3 rings */
8877 kfree(phba->sli.sli3_ring);
8878 phba->sli.sli3_ring = NULL;
8885 * lpfc_create_shost - Create hba physical port with associated scsi host.
8886 * @phba: pointer to lpfc hba data structure.
8888 * This routine is invoked to create HBA physical port and associate a SCSI
8893 * other values - error
8896 lpfc_create_shost(struct lpfc_hba *phba)
8898 struct lpfc_vport *vport;
8899 struct Scsi_Host *shost;
8901 /* Initialize HBA FC structure */
8902 phba->fc_edtov = FF_DEF_EDTOV;
8903 phba->fc_ratov = FF_DEF_RATOV;
8904 phba->fc_altov = FF_DEF_ALTOV;
8905 phba->fc_arbtov = FF_DEF_ARBTOV;
8907 atomic_set(&phba->sdev_cnt, 0);
8908 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
8912 shost = lpfc_shost_from_vport(vport);
8913 phba->pport = vport;
8915 if (phba->nvmet_support) {
8916 /* Only 1 vport (pport) will support NVME target */
8917 phba->targetport = NULL;
8918 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
8919 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
8920 "6076 NVME Target Found\n");
8923 lpfc_debugfs_initialize(vport);
8924 /* Put reference to SCSI host to driver's device private data */
8925 pci_set_drvdata(phba->pcidev, shost);
8928 * At this point we are fully registered with PSA. In addition,
8929 * any initial discovery should be completed.
8931 vport->load_flag |= FC_ALLOW_FDMI;
8932 if (phba->cfg_enable_SmartSAN ||
8933 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
8935 /* Setup appropriate attribute masks */
8936 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
8937 if (phba->cfg_enable_SmartSAN)
8938 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
8940 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
8946 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
8947 * @phba: pointer to lpfc hba data structure.
8949 * This routine is invoked to destroy HBA physical port and the associated
8953 lpfc_destroy_shost(struct lpfc_hba *phba)
8955 struct lpfc_vport *vport = phba->pport;
8957 /* Destroy physical port that associated with the SCSI host */
8958 destroy_port(vport);
8964 * lpfc_setup_bg - Setup Block guard structures and debug areas.
8965 * @phba: pointer to lpfc hba data structure.
8966 * @shost: the shost to be used to detect Block guard settings.
8968 * This routine sets up the local Block guard protocol settings for @shost.
8969 * This routine also allocates memory for debugging bg buffers.
8972 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
8977 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
8978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8979 "1478 Registering BlockGuard with the "
8982 old_mask = phba->cfg_prot_mask;
8983 old_guard = phba->cfg_prot_guard;
8985 /* Only allow supported values */
8986 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
8987 SHOST_DIX_TYPE0_PROTECTION |
8988 SHOST_DIX_TYPE1_PROTECTION);
8989 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
8990 SHOST_DIX_GUARD_CRC);
8992 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
8993 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
8994 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
8996 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
8997 if ((old_mask != phba->cfg_prot_mask) ||
8998 (old_guard != phba->cfg_prot_guard))
8999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9000 "1475 Registering BlockGuard with the "
9001 "SCSI layer: mask %d guard %d\n",
9002 phba->cfg_prot_mask,
9003 phba->cfg_prot_guard);
9005 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9006 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9008 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9009 "1479 Not Registering BlockGuard with the SCSI "
9010 "layer, Bad protection parameters: %d %d\n",
9011 old_mask, old_guard);
9016 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9017 * @phba: pointer to lpfc hba data structure.
9019 * This routine is invoked to perform all the necessary post initialization
9020 * setup for the device.
9023 lpfc_post_init_setup(struct lpfc_hba *phba)
9025 struct Scsi_Host *shost;
9026 struct lpfc_adapter_event_header adapter_event;
9028 /* Get the default values for Model Name and Description */
9029 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9032 * hba setup may have changed the hba_queue_depth so we need to
9033 * adjust the value of can_queue.
9035 shost = pci_get_drvdata(phba->pcidev);
9036 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9038 lpfc_host_attrib_init(shost);
9040 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9041 spin_lock_irq(shost->host_lock);
9042 lpfc_poll_start_timer(phba);
9043 spin_unlock_irq(shost->host_lock);
9046 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9047 "0428 Perform SCSI scan\n");
9048 /* Send board arrival event to upper layer */
9049 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9050 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9051 fc_host_post_vendor_event(shost, fc_get_event_number(),
9052 sizeof(adapter_event),
9053 (char *) &adapter_event,
9059 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9060 * @phba: pointer to lpfc hba data structure.
9062 * This routine is invoked to set up the PCI device memory space for device
9063 * with SLI-3 interface spec.
9067 * other values - error
9070 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9072 struct pci_dev *pdev = phba->pcidev;
9073 unsigned long bar0map_len, bar2map_len;
9081 /* Set the device DMA mask size */
9082 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9084 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9089 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9090 * required by each mapping.
9092 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9093 bar0map_len = pci_resource_len(pdev, 0);
9095 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9096 bar2map_len = pci_resource_len(pdev, 2);
9098 /* Map HBA SLIM to a kernel virtual address. */
9099 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9100 if (!phba->slim_memmap_p) {
9101 dev_printk(KERN_ERR, &pdev->dev,
9102 "ioremap failed for SLIM memory.\n");
9106 /* Map HBA Control Registers to a kernel virtual address. */
9107 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9108 if (!phba->ctrl_regs_memmap_p) {
9109 dev_printk(KERN_ERR, &pdev->dev,
9110 "ioremap failed for HBA control registers.\n");
9111 goto out_iounmap_slim;
9114 /* Allocate memory for SLI-2 structures */
9115 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9116 &phba->slim2p.phys, GFP_KERNEL);
9117 if (!phba->slim2p.virt)
9120 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9121 phba->mbox_ext = (phba->slim2p.virt +
9122 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9123 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9124 phba->IOCBs = (phba->slim2p.virt +
9125 offsetof(struct lpfc_sli2_slim, IOCBs));
9127 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9128 lpfc_sli_hbq_size(),
9129 &phba->hbqslimp.phys,
9131 if (!phba->hbqslimp.virt)
9134 hbq_count = lpfc_sli_hbq_count();
9135 ptr = phba->hbqslimp.virt;
9136 for (i = 0; i < hbq_count; ++i) {
9137 phba->hbqs[i].hbq_virt = ptr;
9138 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9139 ptr += (lpfc_hbq_defs[i]->entry_count *
9140 sizeof(struct lpfc_hbq_entry));
9142 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9143 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9145 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9147 phba->MBslimaddr = phba->slim_memmap_p;
9148 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9149 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9150 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9151 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9156 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9157 phba->slim2p.virt, phba->slim2p.phys);
9159 iounmap(phba->ctrl_regs_memmap_p);
9161 iounmap(phba->slim_memmap_p);
9167 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9168 * @phba: pointer to lpfc hba data structure.
9170 * This routine is invoked to unset the PCI device memory space for device
9171 * with SLI-3 interface spec.
9174 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9176 struct pci_dev *pdev;
9178 /* Obtain PCI device reference */
9182 pdev = phba->pcidev;
9184 /* Free coherent DMA memory allocated */
9185 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9186 phba->hbqslimp.virt, phba->hbqslimp.phys);
9187 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9188 phba->slim2p.virt, phba->slim2p.phys);
9190 /* I/O memory unmap */
9191 iounmap(phba->ctrl_regs_memmap_p);
9192 iounmap(phba->slim_memmap_p);
9198 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9199 * @phba: pointer to lpfc hba data structure.
9201 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9202 * done and check status.
9204 * Return 0 if successful, otherwise -ENODEV.
9207 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9209 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9210 struct lpfc_register reg_data;
9211 int i, port_error = 0;
9214 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9215 memset(®_data, 0, sizeof(reg_data));
9216 if (!phba->sli4_hba.PSMPHRregaddr)
9219 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9220 for (i = 0; i < 3000; i++) {
9221 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9222 &portsmphr_reg.word0) ||
9223 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9224 /* Port has a fatal POST error, break out */
9225 port_error = -ENODEV;
9228 if (LPFC_POST_STAGE_PORT_READY ==
9229 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9235 * If there was a port error during POST, then don't proceed with
9236 * other register reads as the data may not be valid. Just exit.
9239 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9240 "1408 Port Failed POST - portsmphr=0x%x, "
9241 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9242 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9243 portsmphr_reg.word0,
9244 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9245 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9246 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9247 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9248 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9249 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9250 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9251 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9253 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9254 "2534 Device Info: SLIFamily=0x%x, "
9255 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9256 "SLIHint_2=0x%x, FT=0x%x\n",
9257 bf_get(lpfc_sli_intf_sli_family,
9258 &phba->sli4_hba.sli_intf),
9259 bf_get(lpfc_sli_intf_slirev,
9260 &phba->sli4_hba.sli_intf),
9261 bf_get(lpfc_sli_intf_if_type,
9262 &phba->sli4_hba.sli_intf),
9263 bf_get(lpfc_sli_intf_sli_hint1,
9264 &phba->sli4_hba.sli_intf),
9265 bf_get(lpfc_sli_intf_sli_hint2,
9266 &phba->sli4_hba.sli_intf),
9267 bf_get(lpfc_sli_intf_func_type,
9268 &phba->sli4_hba.sli_intf));
9270 * Check for other Port errors during the initialization
9271 * process. Fail the load if the port did not come up
9274 if_type = bf_get(lpfc_sli_intf_if_type,
9275 &phba->sli4_hba.sli_intf);
9277 case LPFC_SLI_INTF_IF_TYPE_0:
9278 phba->sli4_hba.ue_mask_lo =
9279 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9280 phba->sli4_hba.ue_mask_hi =
9281 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9283 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9285 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9286 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9287 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9288 lpfc_printf_log(phba, KERN_ERR,
9290 "1422 Unrecoverable Error "
9291 "Detected during POST "
9292 "uerr_lo_reg=0x%x, "
9293 "uerr_hi_reg=0x%x, "
9294 "ue_mask_lo_reg=0x%x, "
9295 "ue_mask_hi_reg=0x%x\n",
9298 phba->sli4_hba.ue_mask_lo,
9299 phba->sli4_hba.ue_mask_hi);
9300 port_error = -ENODEV;
9303 case LPFC_SLI_INTF_IF_TYPE_2:
9304 case LPFC_SLI_INTF_IF_TYPE_6:
9305 /* Final checks. The port status should be clean. */
9306 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9308 (bf_get(lpfc_sliport_status_err, ®_data) &&
9309 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9310 phba->work_status[0] =
9311 readl(phba->sli4_hba.u.if_type2.
9313 phba->work_status[1] =
9314 readl(phba->sli4_hba.u.if_type2.
9316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9317 "2888 Unrecoverable port error "
9318 "following POST: port status reg "
9319 "0x%x, port_smphr reg 0x%x, "
9320 "error 1=0x%x, error 2=0x%x\n",
9322 portsmphr_reg.word0,
9323 phba->work_status[0],
9324 phba->work_status[1]);
9325 port_error = -ENODEV;
9328 case LPFC_SLI_INTF_IF_TYPE_1:
9337 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9338 * @phba: pointer to lpfc hba data structure.
9339 * @if_type: The SLI4 interface type getting configured.
9341 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9345 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9348 case LPFC_SLI_INTF_IF_TYPE_0:
9349 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9350 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9351 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9352 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9353 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9354 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9355 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9356 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9357 phba->sli4_hba.SLIINTFregaddr =
9358 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9360 case LPFC_SLI_INTF_IF_TYPE_2:
9361 phba->sli4_hba.u.if_type2.EQDregaddr =
9362 phba->sli4_hba.conf_regs_memmap_p +
9363 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9364 phba->sli4_hba.u.if_type2.ERR1regaddr =
9365 phba->sli4_hba.conf_regs_memmap_p +
9366 LPFC_CTL_PORT_ER1_OFFSET;
9367 phba->sli4_hba.u.if_type2.ERR2regaddr =
9368 phba->sli4_hba.conf_regs_memmap_p +
9369 LPFC_CTL_PORT_ER2_OFFSET;
9370 phba->sli4_hba.u.if_type2.CTRLregaddr =
9371 phba->sli4_hba.conf_regs_memmap_p +
9372 LPFC_CTL_PORT_CTL_OFFSET;
9373 phba->sli4_hba.u.if_type2.STATUSregaddr =
9374 phba->sli4_hba.conf_regs_memmap_p +
9375 LPFC_CTL_PORT_STA_OFFSET;
9376 phba->sli4_hba.SLIINTFregaddr =
9377 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9378 phba->sli4_hba.PSMPHRregaddr =
9379 phba->sli4_hba.conf_regs_memmap_p +
9380 LPFC_CTL_PORT_SEM_OFFSET;
9381 phba->sli4_hba.RQDBregaddr =
9382 phba->sli4_hba.conf_regs_memmap_p +
9383 LPFC_ULP0_RQ_DOORBELL;
9384 phba->sli4_hba.WQDBregaddr =
9385 phba->sli4_hba.conf_regs_memmap_p +
9386 LPFC_ULP0_WQ_DOORBELL;
9387 phba->sli4_hba.CQDBregaddr =
9388 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9389 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9390 phba->sli4_hba.MQDBregaddr =
9391 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9392 phba->sli4_hba.BMBXregaddr =
9393 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9395 case LPFC_SLI_INTF_IF_TYPE_6:
9396 phba->sli4_hba.u.if_type2.EQDregaddr =
9397 phba->sli4_hba.conf_regs_memmap_p +
9398 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9399 phba->sli4_hba.u.if_type2.ERR1regaddr =
9400 phba->sli4_hba.conf_regs_memmap_p +
9401 LPFC_CTL_PORT_ER1_OFFSET;
9402 phba->sli4_hba.u.if_type2.ERR2regaddr =
9403 phba->sli4_hba.conf_regs_memmap_p +
9404 LPFC_CTL_PORT_ER2_OFFSET;
9405 phba->sli4_hba.u.if_type2.CTRLregaddr =
9406 phba->sli4_hba.conf_regs_memmap_p +
9407 LPFC_CTL_PORT_CTL_OFFSET;
9408 phba->sli4_hba.u.if_type2.STATUSregaddr =
9409 phba->sli4_hba.conf_regs_memmap_p +
9410 LPFC_CTL_PORT_STA_OFFSET;
9411 phba->sli4_hba.PSMPHRregaddr =
9412 phba->sli4_hba.conf_regs_memmap_p +
9413 LPFC_CTL_PORT_SEM_OFFSET;
9414 phba->sli4_hba.BMBXregaddr =
9415 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9417 case LPFC_SLI_INTF_IF_TYPE_1:
9419 dev_printk(KERN_ERR, &phba->pcidev->dev,
9420 "FATAL - unsupported SLI4 interface type - %d\n",
9427 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9428 * @phba: pointer to lpfc hba data structure.
9429 * @if_type: sli if type to operate on.
9431 * This routine is invoked to set up SLI4 BAR1 register memory map.
9434 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9437 case LPFC_SLI_INTF_IF_TYPE_0:
9438 phba->sli4_hba.PSMPHRregaddr =
9439 phba->sli4_hba.ctrl_regs_memmap_p +
9440 LPFC_SLIPORT_IF0_SMPHR;
9441 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9443 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9445 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9448 case LPFC_SLI_INTF_IF_TYPE_6:
9449 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9450 LPFC_IF6_RQ_DOORBELL;
9451 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9452 LPFC_IF6_WQ_DOORBELL;
9453 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9454 LPFC_IF6_CQ_DOORBELL;
9455 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9456 LPFC_IF6_EQ_DOORBELL;
9457 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9458 LPFC_IF6_MQ_DOORBELL;
9460 case LPFC_SLI_INTF_IF_TYPE_2:
9461 case LPFC_SLI_INTF_IF_TYPE_1:
9463 dev_err(&phba->pcidev->dev,
9464 "FATAL - unsupported SLI4 interface type - %d\n",
9471 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9472 * @phba: pointer to lpfc hba data structure.
9473 * @vf: virtual function number
9475 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9476 * based on the given viftual function number, @vf.
9478 * Return 0 if successful, otherwise -ENODEV.
9481 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9483 if (vf > LPFC_VIR_FUNC_MAX)
9486 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9487 vf * LPFC_VFR_PAGE_SIZE +
9488 LPFC_ULP0_RQ_DOORBELL);
9489 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9490 vf * LPFC_VFR_PAGE_SIZE +
9491 LPFC_ULP0_WQ_DOORBELL);
9492 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9493 vf * LPFC_VFR_PAGE_SIZE +
9494 LPFC_EQCQ_DOORBELL);
9495 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9496 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9497 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9498 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9499 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9504 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9505 * @phba: pointer to lpfc hba data structure.
9507 * This routine is invoked to create the bootstrap mailbox
9508 * region consistent with the SLI-4 interface spec. This
9509 * routine allocates all memory necessary to communicate
9510 * mailbox commands to the port and sets up all alignment
9511 * needs. No locks are expected to be held when calling
9516 * -ENOMEM - could not allocated memory.
9519 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9522 struct lpfc_dmabuf *dmabuf;
9523 struct dma_address *dma_address;
9527 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9532 * The bootstrap mailbox region is comprised of 2 parts
9533 * plus an alignment restriction of 16 bytes.
9535 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9536 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9537 &dmabuf->phys, GFP_KERNEL);
9538 if (!dmabuf->virt) {
9544 * Initialize the bootstrap mailbox pointers now so that the register
9545 * operations are simple later. The mailbox dma address is required
9546 * to be 16-byte aligned. Also align the virtual memory as each
9547 * maibox is copied into the bmbx mailbox region before issuing the
9548 * command to the port.
9550 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9551 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9553 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9554 LPFC_ALIGN_16_BYTE);
9555 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9556 LPFC_ALIGN_16_BYTE);
9559 * Set the high and low physical addresses now. The SLI4 alignment
9560 * requirement is 16 bytes and the mailbox is posted to the port
9561 * as two 30-bit addresses. The other data is a bit marking whether
9562 * the 30-bit address is the high or low address.
9563 * Upcast bmbx aphys to 64bits so shift instruction compiles
9564 * clean on 32 bit machines.
9566 dma_address = &phba->sli4_hba.bmbx.dma_address;
9567 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9568 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9569 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9570 LPFC_BMBX_BIT1_ADDR_HI);
9572 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9573 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9574 LPFC_BMBX_BIT1_ADDR_LO);
9579 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9580 * @phba: pointer to lpfc hba data structure.
9582 * This routine is invoked to teardown the bootstrap mailbox
9583 * region and release all host resources. This routine requires
9584 * the caller to ensure all mailbox commands recovered, no
9585 * additional mailbox comands are sent, and interrupts are disabled
9586 * before calling this routine.
9590 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9592 dma_free_coherent(&phba->pcidev->dev,
9593 phba->sli4_hba.bmbx.bmbx_size,
9594 phba->sli4_hba.bmbx.dmabuf->virt,
9595 phba->sli4_hba.bmbx.dmabuf->phys);
9597 kfree(phba->sli4_hba.bmbx.dmabuf);
9598 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9601 static const char * const lpfc_topo_to_str[] = {
9611 #define LINK_FLAGS_DEF 0x0
9612 #define LINK_FLAGS_P2P 0x1
9613 #define LINK_FLAGS_LOOP 0x2
9615 * lpfc_map_topology - Map the topology read from READ_CONFIG
9616 * @phba: pointer to lpfc hba data structure.
9617 * @rd_config: pointer to read config data
9619 * This routine is invoked to map the topology values as read
9620 * from the read config mailbox command. If the persistent
9621 * topology feature is supported, the firmware will provide the
9622 * saved topology information to be used in INIT_LINK
9625 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9629 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9630 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9631 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9633 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9634 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9637 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9638 "2019 FW does not support persistent topology "
9639 "Using driver parameter defined value [%s]",
9640 lpfc_topo_to_str[phba->cfg_topology]);
9643 /* FW supports persistent topology - override module parameter value */
9644 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9646 /* if ASIC_GEN_NUM >= 0xC) */
9647 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9648 LPFC_SLI_INTF_IF_TYPE_6) ||
9649 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9650 LPFC_SLI_INTF_FAMILY_G6)) {
9652 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9653 ? FLAGS_TOPOLOGY_MODE_LOOP
9654 : FLAGS_TOPOLOGY_MODE_PT_PT);
9656 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9660 /* If topology failover set - pt is '0' or '1' */
9661 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9662 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9664 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9665 ? FLAGS_TOPOLOGY_MODE_PT_PT
9666 : FLAGS_TOPOLOGY_MODE_LOOP);
9669 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9670 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9671 "2020 Using persistent topology value [%s]",
9672 lpfc_topo_to_str[phba->cfg_topology]);
9674 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9675 "2021 Invalid topology values from FW "
9676 "Using driver parameter defined value [%s]",
9677 lpfc_topo_to_str[phba->cfg_topology]);
9682 * lpfc_sli4_read_config - Get the config parameters.
9683 * @phba: pointer to lpfc hba data structure.
9685 * This routine is invoked to read the configuration parameters from the HBA.
9686 * The configuration parameters are used to set the base and maximum values
9687 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9688 * allocation for the port.
9692 * -ENOMEM - No available memory
9693 * -EIO - The mailbox failed to complete successfully.
9696 lpfc_sli4_read_config(struct lpfc_hba *phba)
9699 struct lpfc_mbx_read_config *rd_config;
9700 union lpfc_sli4_cfg_shdr *shdr;
9701 uint32_t shdr_status, shdr_add_status;
9702 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9703 struct lpfc_rsrc_desc_fcfcoe *desc;
9705 uint16_t forced_link_speed;
9706 uint32_t if_type, qmin;
9707 int length, i, rc = 0, rc2;
9709 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9712 "2011 Unable to allocate memory for issuing "
9713 "SLI_CONFIG_SPECIAL mailbox command\n");
9717 lpfc_read_config(phba, pmb);
9719 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9720 if (rc != MBX_SUCCESS) {
9721 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9722 "2012 Mailbox failed , mbxCmd x%x "
9723 "READ_CONFIG, mbxStatus x%x\n",
9724 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9725 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9728 rd_config = &pmb->u.mqe.un.rd_config;
9729 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9730 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9731 phba->sli4_hba.lnk_info.lnk_tp =
9732 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9733 phba->sli4_hba.lnk_info.lnk_no =
9734 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9735 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9736 "3081 lnk_type:%d, lnk_numb:%d\n",
9737 phba->sli4_hba.lnk_info.lnk_tp,
9738 phba->sli4_hba.lnk_info.lnk_no);
9740 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9741 "3082 Mailbox (x%x) returned ldv:x0\n",
9742 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9743 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9744 phba->bbcredit_support = 1;
9745 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9748 phba->sli4_hba.conf_trunk =
9749 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9750 phba->sli4_hba.extents_in_use =
9751 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9752 phba->sli4_hba.max_cfg_param.max_xri =
9753 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9754 /* Reduce resource usage in kdump environment */
9755 if (is_kdump_kernel() &&
9756 phba->sli4_hba.max_cfg_param.max_xri > 512)
9757 phba->sli4_hba.max_cfg_param.max_xri = 512;
9758 phba->sli4_hba.max_cfg_param.xri_base =
9759 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9760 phba->sli4_hba.max_cfg_param.max_vpi =
9761 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9762 /* Limit the max we support */
9763 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9764 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9765 phba->sli4_hba.max_cfg_param.vpi_base =
9766 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9767 phba->sli4_hba.max_cfg_param.max_rpi =
9768 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9769 phba->sli4_hba.max_cfg_param.rpi_base =
9770 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
9771 phba->sli4_hba.max_cfg_param.max_vfi =
9772 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
9773 phba->sli4_hba.max_cfg_param.vfi_base =
9774 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
9775 phba->sli4_hba.max_cfg_param.max_fcfi =
9776 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
9777 phba->sli4_hba.max_cfg_param.max_eq =
9778 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
9779 phba->sli4_hba.max_cfg_param.max_rq =
9780 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
9781 phba->sli4_hba.max_cfg_param.max_wq =
9782 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
9783 phba->sli4_hba.max_cfg_param.max_cq =
9784 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
9785 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
9786 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
9787 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
9788 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
9789 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
9790 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
9791 phba->max_vports = phba->max_vpi;
9793 /* Next decide on FPIN or Signal E2E CGN support
9794 * For congestion alarms and warnings valid combination are:
9795 * 1. FPIN alarms / FPIN warnings
9796 * 2. Signal alarms / Signal warnings
9797 * 3. FPIN alarms / Signal warnings
9798 * 4. Signal alarms / FPIN warnings
9800 * Initialize the adapter frequency to 100 mSecs
9802 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9803 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
9804 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
9806 if (lpfc_use_cgn_signal) {
9807 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
9808 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
9809 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
9811 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
9812 /* MUST support both alarm and warning
9813 * because EDC does not support alarm alone.
9815 if (phba->cgn_reg_signal !=
9816 EDC_CG_SIG_WARN_ONLY) {
9817 /* Must support both or none */
9818 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
9819 phba->cgn_reg_signal =
9820 EDC_CG_SIG_NOTSUPPORTED;
9822 phba->cgn_reg_signal =
9823 EDC_CG_SIG_WARN_ALARM;
9824 phba->cgn_reg_fpin =
9830 /* Set the congestion initial signal and fpin values. */
9831 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
9832 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
9834 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
9835 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
9836 phba->cgn_reg_signal, phba->cgn_reg_fpin);
9838 lpfc_map_topology(phba, rd_config);
9839 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9840 "2003 cfg params Extents? %d "
9845 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
9846 phba->sli4_hba.extents_in_use,
9847 phba->sli4_hba.max_cfg_param.xri_base,
9848 phba->sli4_hba.max_cfg_param.max_xri,
9849 phba->sli4_hba.max_cfg_param.vpi_base,
9850 phba->sli4_hba.max_cfg_param.max_vpi,
9851 phba->sli4_hba.max_cfg_param.vfi_base,
9852 phba->sli4_hba.max_cfg_param.max_vfi,
9853 phba->sli4_hba.max_cfg_param.rpi_base,
9854 phba->sli4_hba.max_cfg_param.max_rpi,
9855 phba->sli4_hba.max_cfg_param.max_fcfi,
9856 phba->sli4_hba.max_cfg_param.max_eq,
9857 phba->sli4_hba.max_cfg_param.max_cq,
9858 phba->sli4_hba.max_cfg_param.max_wq,
9859 phba->sli4_hba.max_cfg_param.max_rq,
9863 * Calculate queue resources based on how
9864 * many WQ/CQ/EQs are available.
9866 qmin = phba->sli4_hba.max_cfg_param.max_wq;
9867 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
9868 qmin = phba->sli4_hba.max_cfg_param.max_cq;
9869 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
9870 qmin = phba->sli4_hba.max_cfg_param.max_eq;
9872 * Whats left after this can go toward NVME / FCP.
9873 * The minus 4 accounts for ELS, NVME LS, MBOX
9874 * plus one extra. When configured for
9875 * NVMET, FCP io channel WQs are not created.
9879 /* Check to see if there is enough for NVME */
9880 if ((phba->cfg_irq_chann > qmin) ||
9881 (phba->cfg_hdw_queue > qmin)) {
9882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9883 "2005 Reducing Queues - "
9884 "FW resource limitation: "
9885 "WQ %d CQ %d EQ %d: min %d: "
9887 phba->sli4_hba.max_cfg_param.max_wq,
9888 phba->sli4_hba.max_cfg_param.max_cq,
9889 phba->sli4_hba.max_cfg_param.max_eq,
9890 qmin, phba->cfg_irq_chann,
9891 phba->cfg_hdw_queue);
9893 if (phba->cfg_irq_chann > qmin)
9894 phba->cfg_irq_chann = qmin;
9895 if (phba->cfg_hdw_queue > qmin)
9896 phba->cfg_hdw_queue = qmin;
9903 /* Update link speed if forced link speed is supported */
9904 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9905 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9907 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
9908 if (forced_link_speed) {
9909 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
9911 switch (forced_link_speed) {
9913 phba->cfg_link_speed =
9914 LPFC_USER_LINK_SPEED_1G;
9917 phba->cfg_link_speed =
9918 LPFC_USER_LINK_SPEED_2G;
9921 phba->cfg_link_speed =
9922 LPFC_USER_LINK_SPEED_4G;
9925 phba->cfg_link_speed =
9926 LPFC_USER_LINK_SPEED_8G;
9928 case LINK_SPEED_10G:
9929 phba->cfg_link_speed =
9930 LPFC_USER_LINK_SPEED_10G;
9932 case LINK_SPEED_16G:
9933 phba->cfg_link_speed =
9934 LPFC_USER_LINK_SPEED_16G;
9936 case LINK_SPEED_32G:
9937 phba->cfg_link_speed =
9938 LPFC_USER_LINK_SPEED_32G;
9940 case LINK_SPEED_64G:
9941 phba->cfg_link_speed =
9942 LPFC_USER_LINK_SPEED_64G;
9945 phba->cfg_link_speed =
9946 LPFC_USER_LINK_SPEED_AUTO;
9949 lpfc_printf_log(phba, KERN_ERR,
9951 "0047 Unrecognized link "
9954 phba->cfg_link_speed =
9955 LPFC_USER_LINK_SPEED_AUTO;
9960 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
9961 length = phba->sli4_hba.max_cfg_param.max_xri -
9962 lpfc_sli4_get_els_iocb_cnt(phba);
9963 if (phba->cfg_hba_queue_depth > length) {
9964 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9965 "3361 HBA queue depth changed from %d to %d\n",
9966 phba->cfg_hba_queue_depth, length);
9967 phba->cfg_hba_queue_depth = length;
9970 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
9971 LPFC_SLI_INTF_IF_TYPE_2)
9974 /* get the pf# and vf# for SLI4 if_type 2 port */
9975 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
9976 sizeof(struct lpfc_sli4_cfg_mhdr));
9977 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
9978 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
9979 length, LPFC_SLI4_MBX_EMBED);
9981 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9982 shdr = (union lpfc_sli4_cfg_shdr *)
9983 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
9984 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9985 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9986 if (rc2 || shdr_status || shdr_add_status) {
9987 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9988 "3026 Mailbox failed , mbxCmd x%x "
9989 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
9990 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9991 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9995 /* search for fc_fcoe resrouce descriptor */
9996 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
9998 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
9999 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10000 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10001 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10002 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10003 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10006 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10007 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10008 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10009 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10010 phba->sli4_hba.iov.pf_number =
10011 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10012 phba->sli4_hba.iov.vf_number =
10013 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10018 if (i < LPFC_RSRC_DESC_MAX_NUM)
10019 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10020 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10021 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10022 phba->sli4_hba.iov.vf_number);
10024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10025 "3028 GET_FUNCTION_CONFIG: failed to find "
10026 "Resource Descriptor:x%x\n",
10027 LPFC_RSRC_DESC_TYPE_FCFCOE);
10030 mempool_free(pmb, phba->mbox_mem_pool);
10035 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10036 * @phba: pointer to lpfc hba data structure.
10038 * This routine is invoked to setup the port-side endian order when
10039 * the port if_type is 0. This routine has no function for other
10044 * -ENOMEM - No available memory
10045 * -EIO - The mailbox failed to complete successfully.
10048 lpfc_setup_endian_order(struct lpfc_hba *phba)
10050 LPFC_MBOXQ_t *mboxq;
10051 uint32_t if_type, rc = 0;
10052 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10053 HOST_ENDIAN_HIGH_WORD1};
10055 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10057 case LPFC_SLI_INTF_IF_TYPE_0:
10058 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10062 "0492 Unable to allocate memory for "
10063 "issuing SLI_CONFIG_SPECIAL mailbox "
10069 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10070 * two words to contain special data values and no other data.
10072 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10073 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10074 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10075 if (rc != MBX_SUCCESS) {
10076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10077 "0493 SLI_CONFIG_SPECIAL mailbox "
10078 "failed with status x%x\n",
10082 mempool_free(mboxq, phba->mbox_mem_pool);
10084 case LPFC_SLI_INTF_IF_TYPE_6:
10085 case LPFC_SLI_INTF_IF_TYPE_2:
10086 case LPFC_SLI_INTF_IF_TYPE_1:
10094 * lpfc_sli4_queue_verify - Verify and update EQ counts
10095 * @phba: pointer to lpfc hba data structure.
10097 * This routine is invoked to check the user settable queue counts for EQs.
10098 * After this routine is called the counts will be set to valid values that
10099 * adhere to the constraints of the system's interrupt vectors and the port's
10104 * -ENOMEM - No available memory
10107 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10110 * Sanity check for configured queue parameters against the run-time
10111 * device parameters
10114 if (phba->nvmet_support) {
10115 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10116 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10117 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10118 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10122 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10123 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10124 phba->cfg_nvmet_mrq);
10126 /* Get EQ depth from module parameter, fake the default for now */
10127 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10128 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10130 /* Get CQ depth from module parameter, fake the default for now */
10131 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10132 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10137 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10139 struct lpfc_queue *qdesc;
10143 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10144 /* Create Fast Path IO CQs */
10145 if (phba->enab_exp_wqcq_pages)
10146 /* Increase the CQ size when WQEs contain an embedded cdb */
10147 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10148 phba->sli4_hba.cq_esize,
10149 LPFC_CQE_EXP_COUNT, cpu);
10152 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10153 phba->sli4_hba.cq_esize,
10154 phba->sli4_hba.cq_ecount, cpu);
10156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10157 "0499 Failed allocate fast-path IO CQ (%d)\n",
10161 qdesc->qe_valid = 1;
10163 qdesc->chann = cpu;
10164 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10166 /* Create Fast Path IO WQs */
10167 if (phba->enab_exp_wqcq_pages) {
10168 /* Increase the WQ size when WQEs contain an embedded cdb */
10169 wqesize = (phba->fcp_embed_io) ?
10170 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10171 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10173 LPFC_WQE_EXP_COUNT, cpu);
10175 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10176 phba->sli4_hba.wq_esize,
10177 phba->sli4_hba.wq_ecount, cpu);
10180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10181 "0503 Failed allocate fast-path IO WQ (%d)\n",
10186 qdesc->chann = cpu;
10187 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10188 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10193 * lpfc_sli4_queue_create - Create all the SLI4 queues
10194 * @phba: pointer to lpfc hba data structure.
10196 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10197 * operation. For each SLI4 queue type, the parameters such as queue entry
10198 * count (queue depth) shall be taken from the module parameter. For now,
10199 * we just use some constant number as place holder.
10203 * -ENOMEM - No availble memory
10204 * -EIO - The mailbox failed to complete successfully.
10207 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10209 struct lpfc_queue *qdesc;
10210 int idx, cpu, eqcpu;
10211 struct lpfc_sli4_hdw_queue *qp;
10212 struct lpfc_vector_map_info *cpup;
10213 struct lpfc_vector_map_info *eqcpup;
10214 struct lpfc_eq_intr_info *eqi;
10217 * Create HBA Record arrays.
10218 * Both NVME and FCP will share that same vectors / EQs
10220 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10221 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10222 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10223 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10224 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10225 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10226 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10227 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10228 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10229 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10231 if (!phba->sli4_hba.hdwq) {
10232 phba->sli4_hba.hdwq = kcalloc(
10233 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10235 if (!phba->sli4_hba.hdwq) {
10236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10237 "6427 Failed allocate memory for "
10238 "fast-path Hardware Queue array\n");
10241 /* Prepare hardware queues to take IO buffers */
10242 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10243 qp = &phba->sli4_hba.hdwq[idx];
10244 spin_lock_init(&qp->io_buf_list_get_lock);
10245 spin_lock_init(&qp->io_buf_list_put_lock);
10246 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10247 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10248 qp->get_io_bufs = 0;
10249 qp->put_io_bufs = 0;
10250 qp->total_io_bufs = 0;
10251 spin_lock_init(&qp->abts_io_buf_list_lock);
10252 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10253 qp->abts_scsi_io_bufs = 0;
10254 qp->abts_nvme_io_bufs = 0;
10255 INIT_LIST_HEAD(&qp->sgl_list);
10256 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10257 spin_lock_init(&qp->hdwq_lock);
10261 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10262 if (phba->nvmet_support) {
10263 phba->sli4_hba.nvmet_cqset = kcalloc(
10264 phba->cfg_nvmet_mrq,
10265 sizeof(struct lpfc_queue *),
10267 if (!phba->sli4_hba.nvmet_cqset) {
10268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10269 "3121 Fail allocate memory for "
10270 "fast-path CQ set array\n");
10273 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10274 phba->cfg_nvmet_mrq,
10275 sizeof(struct lpfc_queue *),
10277 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10279 "3122 Fail allocate memory for "
10280 "fast-path RQ set hdr array\n");
10283 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10284 phba->cfg_nvmet_mrq,
10285 sizeof(struct lpfc_queue *),
10287 if (!phba->sli4_hba.nvmet_mrq_data) {
10288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10289 "3124 Fail allocate memory for "
10290 "fast-path RQ set data array\n");
10296 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10298 /* Create HBA Event Queues (EQs) */
10299 for_each_present_cpu(cpu) {
10300 /* We only want to create 1 EQ per vector, even though
10301 * multiple CPUs might be using that vector. so only
10302 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10304 cpup = &phba->sli4_hba.cpu_map[cpu];
10305 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10308 /* Get a ptr to the Hardware Queue associated with this CPU */
10309 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10311 /* Allocate an EQ */
10312 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10313 phba->sli4_hba.eq_esize,
10314 phba->sli4_hba.eq_ecount, cpu);
10316 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10317 "0497 Failed allocate EQ (%d)\n",
10321 qdesc->qe_valid = 1;
10322 qdesc->hdwq = cpup->hdwq;
10323 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10324 qdesc->last_cpu = qdesc->chann;
10326 /* Save the allocated EQ in the Hardware Queue */
10327 qp->hba_eq = qdesc;
10329 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10330 list_add(&qdesc->cpu_list, &eqi->list);
10333 /* Now we need to populate the other Hardware Queues, that share
10334 * an IRQ vector, with the associated EQ ptr.
10336 for_each_present_cpu(cpu) {
10337 cpup = &phba->sli4_hba.cpu_map[cpu];
10339 /* Check for EQ already allocated in previous loop */
10340 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10343 /* Check for multiple CPUs per hdwq */
10344 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10348 /* We need to share an EQ for this hdwq */
10349 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10350 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10351 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10354 /* Allocate IO Path SLI4 CQ/WQs */
10355 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10356 if (lpfc_alloc_io_wq_cq(phba, idx))
10360 if (phba->nvmet_support) {
10361 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10362 cpu = lpfc_find_cpu_handle(phba, idx,
10363 LPFC_FIND_BY_HDWQ);
10364 qdesc = lpfc_sli4_queue_alloc(phba,
10365 LPFC_DEFAULT_PAGE_SIZE,
10366 phba->sli4_hba.cq_esize,
10367 phba->sli4_hba.cq_ecount,
10370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10371 "3142 Failed allocate NVME "
10372 "CQ Set (%d)\n", idx);
10375 qdesc->qe_valid = 1;
10377 qdesc->chann = cpu;
10378 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10383 * Create Slow Path Completion Queues (CQs)
10386 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10387 /* Create slow-path Mailbox Command Complete Queue */
10388 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10389 phba->sli4_hba.cq_esize,
10390 phba->sli4_hba.cq_ecount, cpu);
10392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10393 "0500 Failed allocate slow-path mailbox CQ\n");
10396 qdesc->qe_valid = 1;
10397 phba->sli4_hba.mbx_cq = qdesc;
10399 /* Create slow-path ELS Complete Queue */
10400 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10401 phba->sli4_hba.cq_esize,
10402 phba->sli4_hba.cq_ecount, cpu);
10404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10405 "0501 Failed allocate slow-path ELS CQ\n");
10408 qdesc->qe_valid = 1;
10409 qdesc->chann = cpu;
10410 phba->sli4_hba.els_cq = qdesc;
10414 * Create Slow Path Work Queues (WQs)
10417 /* Create Mailbox Command Queue */
10419 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10420 phba->sli4_hba.mq_esize,
10421 phba->sli4_hba.mq_ecount, cpu);
10423 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10424 "0505 Failed allocate slow-path MQ\n");
10427 qdesc->chann = cpu;
10428 phba->sli4_hba.mbx_wq = qdesc;
10431 * Create ELS Work Queues
10434 /* Create slow-path ELS Work Queue */
10435 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10436 phba->sli4_hba.wq_esize,
10437 phba->sli4_hba.wq_ecount, cpu);
10439 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10440 "0504 Failed allocate slow-path ELS WQ\n");
10443 qdesc->chann = cpu;
10444 phba->sli4_hba.els_wq = qdesc;
10445 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10447 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10448 /* Create NVME LS Complete Queue */
10449 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10450 phba->sli4_hba.cq_esize,
10451 phba->sli4_hba.cq_ecount, cpu);
10453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10454 "6079 Failed allocate NVME LS CQ\n");
10457 qdesc->chann = cpu;
10458 qdesc->qe_valid = 1;
10459 phba->sli4_hba.nvmels_cq = qdesc;
10461 /* Create NVME LS Work Queue */
10462 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10463 phba->sli4_hba.wq_esize,
10464 phba->sli4_hba.wq_ecount, cpu);
10466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10467 "6080 Failed allocate NVME LS WQ\n");
10470 qdesc->chann = cpu;
10471 phba->sli4_hba.nvmels_wq = qdesc;
10472 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10476 * Create Receive Queue (RQ)
10479 /* Create Receive Queue for header */
10480 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10481 phba->sli4_hba.rq_esize,
10482 phba->sli4_hba.rq_ecount, cpu);
10484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10485 "0506 Failed allocate receive HRQ\n");
10488 phba->sli4_hba.hdr_rq = qdesc;
10490 /* Create Receive Queue for data */
10491 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10492 phba->sli4_hba.rq_esize,
10493 phba->sli4_hba.rq_ecount, cpu);
10495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10496 "0507 Failed allocate receive DRQ\n");
10499 phba->sli4_hba.dat_rq = qdesc;
10501 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10502 phba->nvmet_support) {
10503 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10504 cpu = lpfc_find_cpu_handle(phba, idx,
10505 LPFC_FIND_BY_HDWQ);
10506 /* Create NVMET Receive Queue for header */
10507 qdesc = lpfc_sli4_queue_alloc(phba,
10508 LPFC_DEFAULT_PAGE_SIZE,
10509 phba->sli4_hba.rq_esize,
10510 LPFC_NVMET_RQE_DEF_COUNT,
10513 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10514 "3146 Failed allocate "
10519 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10521 /* Only needed for header of RQ pair */
10522 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10525 if (qdesc->rqbp == NULL) {
10526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10527 "6131 Failed allocate "
10532 /* Put list in known state in case driver load fails. */
10533 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10535 /* Create NVMET Receive Queue for data */
10536 qdesc = lpfc_sli4_queue_alloc(phba,
10537 LPFC_DEFAULT_PAGE_SIZE,
10538 phba->sli4_hba.rq_esize,
10539 LPFC_NVMET_RQE_DEF_COUNT,
10542 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10543 "3156 Failed allocate "
10548 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10552 /* Clear NVME stats */
10553 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10554 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10555 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10556 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10560 /* Clear SCSI stats */
10561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10562 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10563 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10564 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10571 lpfc_sli4_queue_destroy(phba);
10576 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10579 lpfc_sli4_queue_free(*qp);
10585 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10592 for (idx = 0; idx < max; idx++)
10593 __lpfc_sli4_release_queue(&(*qs)[idx]);
10600 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10602 struct lpfc_sli4_hdw_queue *hdwq;
10603 struct lpfc_queue *eq;
10606 hdwq = phba->sli4_hba.hdwq;
10608 /* Loop thru all Hardware Queues */
10609 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10610 /* Free the CQ/WQ corresponding to the Hardware Queue */
10611 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10612 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10613 hdwq[idx].hba_eq = NULL;
10614 hdwq[idx].io_cq = NULL;
10615 hdwq[idx].io_wq = NULL;
10616 if (phba->cfg_xpsgl && !phba->nvmet_support)
10617 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10618 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10620 /* Loop thru all IRQ vectors */
10621 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10622 /* Free the EQ corresponding to the IRQ vector */
10623 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10624 lpfc_sli4_queue_free(eq);
10625 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10630 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10631 * @phba: pointer to lpfc hba data structure.
10633 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10638 * -ENOMEM - No available memory
10639 * -EIO - The mailbox failed to complete successfully.
10642 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10645 * Set FREE_INIT before beginning to free the queues.
10646 * Wait until the users of queues to acknowledge to
10647 * release queues by clearing FREE_WAIT.
10649 spin_lock_irq(&phba->hbalock);
10650 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10651 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10652 spin_unlock_irq(&phba->hbalock);
10654 spin_lock_irq(&phba->hbalock);
10656 spin_unlock_irq(&phba->hbalock);
10658 lpfc_sli4_cleanup_poll_list(phba);
10660 /* Release HBA eqs */
10661 if (phba->sli4_hba.hdwq)
10662 lpfc_sli4_release_hdwq(phba);
10664 if (phba->nvmet_support) {
10665 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10666 phba->cfg_nvmet_mrq);
10668 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10669 phba->cfg_nvmet_mrq);
10670 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10671 phba->cfg_nvmet_mrq);
10674 /* Release mailbox command work queue */
10675 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10677 /* Release ELS work queue */
10678 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10680 /* Release ELS work queue */
10681 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10683 /* Release unsolicited receive queue */
10684 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10685 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10687 /* Release ELS complete queue */
10688 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10690 /* Release NVME LS complete queue */
10691 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10693 /* Release mailbox command complete queue */
10694 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10696 /* Everything on this list has been freed */
10697 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10699 /* Done with freeing the queues */
10700 spin_lock_irq(&phba->hbalock);
10701 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10702 spin_unlock_irq(&phba->hbalock);
10706 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10708 struct lpfc_rqb *rqbp;
10709 struct lpfc_dmabuf *h_buf;
10710 struct rqb_dmabuf *rqb_buffer;
10713 while (!list_empty(&rqbp->rqb_buffer_list)) {
10714 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10715 struct lpfc_dmabuf, list);
10717 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10718 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10719 rqbp->buffer_count--;
10725 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10726 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10727 int qidx, uint32_t qtype)
10729 struct lpfc_sli_ring *pring;
10732 if (!eq || !cq || !wq) {
10733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10734 "6085 Fast-path %s (%d) not allocated\n",
10735 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10739 /* create the Cq first */
10740 rc = lpfc_cq_create(phba, cq, eq,
10741 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10744 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10745 qidx, (uint32_t)rc);
10749 if (qtype != LPFC_MBOX) {
10750 /* Setup cq_map for fast lookup */
10752 *cq_map = cq->queue_id;
10754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10755 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10756 qidx, cq->queue_id, qidx, eq->queue_id);
10758 /* create the wq */
10759 rc = lpfc_wq_create(phba, wq, cq, qtype);
10761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10762 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10763 qidx, (uint32_t)rc);
10764 /* no need to tear down cq - caller will do so */
10768 /* Bind this CQ/WQ to the NVME ring */
10770 pring->sli.sli4.wqp = (void *)wq;
10773 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10774 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
10775 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
10777 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
10779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10780 "0539 Failed setup of slow-path MQ: "
10781 "rc = 0x%x\n", rc);
10782 /* no need to tear down cq - caller will do so */
10786 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10787 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
10788 phba->sli4_hba.mbx_wq->queue_id,
10789 phba->sli4_hba.mbx_cq->queue_id);
10796 * lpfc_setup_cq_lookup - Setup the CQ lookup table
10797 * @phba: pointer to lpfc hba data structure.
10799 * This routine will populate the cq_lookup table by all
10800 * available CQ queue_id's.
10803 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
10805 struct lpfc_queue *eq, *childq;
10808 memset(phba->sli4_hba.cq_lookup, 0,
10809 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
10810 /* Loop thru all IRQ vectors */
10811 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10812 /* Get the EQ corresponding to the IRQ vector */
10813 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10816 /* Loop through all CQs associated with that EQ */
10817 list_for_each_entry(childq, &eq->child_list, list) {
10818 if (childq->queue_id > phba->sli4_hba.cq_max)
10820 if (childq->subtype == LPFC_IO)
10821 phba->sli4_hba.cq_lookup[childq->queue_id] =
10828 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
10829 * @phba: pointer to lpfc hba data structure.
10831 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
10836 * -ENOMEM - No available memory
10837 * -EIO - The mailbox failed to complete successfully.
10840 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
10842 uint32_t shdr_status, shdr_add_status;
10843 union lpfc_sli4_cfg_shdr *shdr;
10844 struct lpfc_vector_map_info *cpup;
10845 struct lpfc_sli4_hdw_queue *qp;
10846 LPFC_MBOXQ_t *mboxq;
10848 uint32_t length, usdelay;
10851 /* Check for dual-ULP support */
10852 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10855 "3249 Unable to allocate memory for "
10856 "QUERY_FW_CFG mailbox command\n");
10859 length = (sizeof(struct lpfc_mbx_query_fw_config) -
10860 sizeof(struct lpfc_sli4_cfg_mhdr));
10861 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10862 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
10863 length, LPFC_SLI4_MBX_EMBED);
10865 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10867 shdr = (union lpfc_sli4_cfg_shdr *)
10868 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
10869 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10870 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10871 if (shdr_status || shdr_add_status || rc) {
10872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10873 "3250 QUERY_FW_CFG mailbox failed with status "
10874 "x%x add_status x%x, mbx status x%x\n",
10875 shdr_status, shdr_add_status, rc);
10876 mempool_free(mboxq, phba->mbox_mem_pool);
10881 phba->sli4_hba.fw_func_mode =
10882 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
10883 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
10884 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
10885 phba->sli4_hba.physical_port =
10886 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
10887 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10888 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
10889 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
10890 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
10892 mempool_free(mboxq, phba->mbox_mem_pool);
10895 * Set up HBA Event Queues (EQs)
10897 qp = phba->sli4_hba.hdwq;
10899 /* Set up HBA event queue */
10901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10902 "3147 Fast-path EQs not allocated\n");
10907 /* Loop thru all IRQ vectors */
10908 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10909 /* Create HBA Event Queues (EQs) in order */
10910 for_each_present_cpu(cpu) {
10911 cpup = &phba->sli4_hba.cpu_map[cpu];
10913 /* Look for the CPU thats using that vector with
10914 * LPFC_CPU_FIRST_IRQ set.
10916 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10918 if (qidx != cpup->eq)
10921 /* Create an EQ for that vector */
10922 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
10923 phba->cfg_fcp_imax);
10925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10926 "0523 Failed setup of fast-path"
10927 " EQ (%d), rc = 0x%x\n",
10928 cpup->eq, (uint32_t)rc);
10932 /* Save the EQ for that vector in the hba_eq_hdl */
10933 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
10934 qp[cpup->hdwq].hba_eq;
10936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10937 "2584 HBA EQ setup: queue[%d]-id=%d\n",
10939 qp[cpup->hdwq].hba_eq->queue_id);
10943 /* Loop thru all Hardware Queues */
10944 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
10945 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
10946 cpup = &phba->sli4_hba.cpu_map[cpu];
10948 /* Create the CQ/WQ corresponding to the Hardware Queue */
10949 rc = lpfc_create_wq_cq(phba,
10950 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
10953 &phba->sli4_hba.hdwq[qidx].io_cq_map,
10957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10958 "0535 Failed to setup fastpath "
10959 "IO WQ/CQ (%d), rc = 0x%x\n",
10960 qidx, (uint32_t)rc);
10966 * Set up Slow Path Complete Queues (CQs)
10969 /* Set up slow-path MBOX CQ/MQ */
10971 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
10972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10973 "0528 %s not allocated\n",
10974 phba->sli4_hba.mbx_cq ?
10975 "Mailbox WQ" : "Mailbox CQ");
10980 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
10981 phba->sli4_hba.mbx_cq,
10982 phba->sli4_hba.mbx_wq,
10983 NULL, 0, LPFC_MBOX);
10985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10986 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
10990 if (phba->nvmet_support) {
10991 if (!phba->sli4_hba.nvmet_cqset) {
10992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10993 "3165 Fast-path NVME CQ Set "
10994 "array not allocated\n");
10998 if (phba->cfg_nvmet_mrq > 1) {
10999 rc = lpfc_cq_create_set(phba,
11000 phba->sli4_hba.nvmet_cqset,
11002 LPFC_WCQ, LPFC_NVMET);
11004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11005 "3164 Failed setup of NVME CQ "
11006 "Set, rc = 0x%x\n",
11011 /* Set up NVMET Receive Complete Queue */
11012 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11014 LPFC_WCQ, LPFC_NVMET);
11016 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11017 "6089 Failed setup NVMET CQ: "
11018 "rc = 0x%x\n", (uint32_t)rc);
11021 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11023 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11024 "6090 NVMET CQ setup: cq-id=%d, "
11025 "parent eq-id=%d\n",
11026 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11027 qp[0].hba_eq->queue_id);
11031 /* Set up slow-path ELS WQ/CQ */
11032 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11034 "0530 ELS %s not allocated\n",
11035 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11039 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11040 phba->sli4_hba.els_cq,
11041 phba->sli4_hba.els_wq,
11042 NULL, 0, LPFC_ELS);
11044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11045 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11049 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11050 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11051 phba->sli4_hba.els_wq->queue_id,
11052 phba->sli4_hba.els_cq->queue_id);
11054 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11055 /* Set up NVME LS Complete Queue */
11056 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11058 "6091 LS %s not allocated\n",
11059 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11063 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11064 phba->sli4_hba.nvmels_cq,
11065 phba->sli4_hba.nvmels_wq,
11066 NULL, 0, LPFC_NVME_LS);
11068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11069 "0526 Failed setup of NVVME LS WQ/CQ: "
11070 "rc = 0x%x\n", (uint32_t)rc);
11074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11075 "6096 ELS WQ setup: wq-id=%d, "
11076 "parent cq-id=%d\n",
11077 phba->sli4_hba.nvmels_wq->queue_id,
11078 phba->sli4_hba.nvmels_cq->queue_id);
11082 * Create NVMET Receive Queue (RQ)
11084 if (phba->nvmet_support) {
11085 if ((!phba->sli4_hba.nvmet_cqset) ||
11086 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11087 (!phba->sli4_hba.nvmet_mrq_data)) {
11088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11089 "6130 MRQ CQ Queues not "
11094 if (phba->cfg_nvmet_mrq > 1) {
11095 rc = lpfc_mrq_create(phba,
11096 phba->sli4_hba.nvmet_mrq_hdr,
11097 phba->sli4_hba.nvmet_mrq_data,
11098 phba->sli4_hba.nvmet_cqset,
11101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11102 "6098 Failed setup of NVMET "
11103 "MRQ: rc = 0x%x\n",
11109 rc = lpfc_rq_create(phba,
11110 phba->sli4_hba.nvmet_mrq_hdr[0],
11111 phba->sli4_hba.nvmet_mrq_data[0],
11112 phba->sli4_hba.nvmet_cqset[0],
11115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11116 "6057 Failed setup of NVMET "
11117 "Receive Queue: rc = 0x%x\n",
11123 phba, KERN_INFO, LOG_INIT,
11124 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11125 "dat-rq-id=%d parent cq-id=%d\n",
11126 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11127 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11128 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11133 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11135 "0540 Receive Queue not allocated\n");
11140 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11141 phba->sli4_hba.els_cq, LPFC_USOL);
11143 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11144 "0541 Failed setup of Receive Queue: "
11145 "rc = 0x%x\n", (uint32_t)rc);
11149 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11150 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11151 "parent cq-id=%d\n",
11152 phba->sli4_hba.hdr_rq->queue_id,
11153 phba->sli4_hba.dat_rq->queue_id,
11154 phba->sli4_hba.els_cq->queue_id);
11156 if (phba->cfg_fcp_imax)
11157 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11161 for (qidx = 0; qidx < phba->cfg_irq_chann;
11162 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11163 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11166 if (phba->sli4_hba.cq_max) {
11167 kfree(phba->sli4_hba.cq_lookup);
11168 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11169 sizeof(struct lpfc_queue *), GFP_KERNEL);
11170 if (!phba->sli4_hba.cq_lookup) {
11171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11172 "0549 Failed setup of CQ Lookup table: "
11173 "size 0x%x\n", phba->sli4_hba.cq_max);
11177 lpfc_setup_cq_lookup(phba);
11182 lpfc_sli4_queue_unset(phba);
11188 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11189 * @phba: pointer to lpfc hba data structure.
11191 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11196 * -ENOMEM - No available memory
11197 * -EIO - The mailbox failed to complete successfully.
11200 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11202 struct lpfc_sli4_hdw_queue *qp;
11203 struct lpfc_queue *eq;
11206 /* Unset mailbox command work queue */
11207 if (phba->sli4_hba.mbx_wq)
11208 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11210 /* Unset NVME LS work queue */
11211 if (phba->sli4_hba.nvmels_wq)
11212 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11214 /* Unset ELS work queue */
11215 if (phba->sli4_hba.els_wq)
11216 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11218 /* Unset unsolicited receive queue */
11219 if (phba->sli4_hba.hdr_rq)
11220 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11221 phba->sli4_hba.dat_rq);
11223 /* Unset mailbox command complete queue */
11224 if (phba->sli4_hba.mbx_cq)
11225 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11227 /* Unset ELS complete queue */
11228 if (phba->sli4_hba.els_cq)
11229 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11231 /* Unset NVME LS complete queue */
11232 if (phba->sli4_hba.nvmels_cq)
11233 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11235 if (phba->nvmet_support) {
11236 /* Unset NVMET MRQ queue */
11237 if (phba->sli4_hba.nvmet_mrq_hdr) {
11238 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11241 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11242 phba->sli4_hba.nvmet_mrq_data[qidx]);
11245 /* Unset NVMET CQ Set complete queue */
11246 if (phba->sli4_hba.nvmet_cqset) {
11247 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11249 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11253 /* Unset fast-path SLI4 queues */
11254 if (phba->sli4_hba.hdwq) {
11255 /* Loop thru all Hardware Queues */
11256 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11257 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11258 qp = &phba->sli4_hba.hdwq[qidx];
11259 lpfc_wq_destroy(phba, qp->io_wq);
11260 lpfc_cq_destroy(phba, qp->io_cq);
11262 /* Loop thru all IRQ vectors */
11263 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11264 /* Destroy the EQ corresponding to the IRQ vector */
11265 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11266 lpfc_eq_destroy(phba, eq);
11270 kfree(phba->sli4_hba.cq_lookup);
11271 phba->sli4_hba.cq_lookup = NULL;
11272 phba->sli4_hba.cq_max = 0;
11276 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11277 * @phba: pointer to lpfc hba data structure.
11279 * This routine is invoked to allocate and set up a pool of completion queue
11280 * events. The body of the completion queue event is a completion queue entry
11281 * CQE. For now, this pool is used for the interrupt service routine to queue
11282 * the following HBA completion queue events for the worker thread to process:
11283 * - Mailbox asynchronous events
11284 * - Receive queue completion unsolicited events
11285 * Later, this can be used for all the slow-path events.
11289 * -ENOMEM - No available memory
11292 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11294 struct lpfc_cq_event *cq_event;
11297 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11298 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11300 goto out_pool_create_fail;
11301 list_add_tail(&cq_event->list,
11302 &phba->sli4_hba.sp_cqe_event_pool);
11306 out_pool_create_fail:
11307 lpfc_sli4_cq_event_pool_destroy(phba);
11312 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11313 * @phba: pointer to lpfc hba data structure.
11315 * This routine is invoked to free the pool of completion queue events at
11316 * driver unload time. Note that, it is the responsibility of the driver
11317 * cleanup routine to free all the outstanding completion-queue events
11318 * allocated from this pool back into the pool before invoking this routine
11319 * to destroy the pool.
11322 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11324 struct lpfc_cq_event *cq_event, *next_cq_event;
11326 list_for_each_entry_safe(cq_event, next_cq_event,
11327 &phba->sli4_hba.sp_cqe_event_pool, list) {
11328 list_del(&cq_event->list);
11334 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11335 * @phba: pointer to lpfc hba data structure.
11337 * This routine is the lock free version of the API invoked to allocate a
11338 * completion-queue event from the free pool.
11340 * Return: Pointer to the newly allocated completion-queue event if successful
11343 struct lpfc_cq_event *
11344 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11346 struct lpfc_cq_event *cq_event = NULL;
11348 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11349 struct lpfc_cq_event, list);
11354 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11355 * @phba: pointer to lpfc hba data structure.
11357 * This routine is the lock version of the API invoked to allocate a
11358 * completion-queue event from the free pool.
11360 * Return: Pointer to the newly allocated completion-queue event if successful
11363 struct lpfc_cq_event *
11364 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11366 struct lpfc_cq_event *cq_event;
11367 unsigned long iflags;
11369 spin_lock_irqsave(&phba->hbalock, iflags);
11370 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11371 spin_unlock_irqrestore(&phba->hbalock, iflags);
11376 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11377 * @phba: pointer to lpfc hba data structure.
11378 * @cq_event: pointer to the completion queue event to be freed.
11380 * This routine is the lock free version of the API invoked to release a
11381 * completion-queue event back into the free pool.
11384 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11385 struct lpfc_cq_event *cq_event)
11387 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11391 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11392 * @phba: pointer to lpfc hba data structure.
11393 * @cq_event: pointer to the completion queue event to be freed.
11395 * This routine is the lock version of the API invoked to release a
11396 * completion-queue event back into the free pool.
11399 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11400 struct lpfc_cq_event *cq_event)
11402 unsigned long iflags;
11403 spin_lock_irqsave(&phba->hbalock, iflags);
11404 __lpfc_sli4_cq_event_release(phba, cq_event);
11405 spin_unlock_irqrestore(&phba->hbalock, iflags);
11409 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11410 * @phba: pointer to lpfc hba data structure.
11412 * This routine is to free all the pending completion-queue events to the
11413 * back into the free pool for device reset.
11416 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11418 LIST_HEAD(cq_event_list);
11419 struct lpfc_cq_event *cq_event;
11420 unsigned long iflags;
11422 /* Retrieve all the pending WCQEs from pending WCQE lists */
11424 /* Pending ELS XRI abort events */
11425 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11426 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11428 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11430 /* Pending asynnc events */
11431 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11432 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11434 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11436 while (!list_empty(&cq_event_list)) {
11437 list_remove_head(&cq_event_list, cq_event,
11438 struct lpfc_cq_event, list);
11439 lpfc_sli4_cq_event_release(phba, cq_event);
11444 * lpfc_pci_function_reset - Reset pci function.
11445 * @phba: pointer to lpfc hba data structure.
11447 * This routine is invoked to request a PCI function reset. It will destroys
11448 * all resources assigned to the PCI function which originates this request.
11452 * -ENOMEM - No available memory
11453 * -EIO - The mailbox failed to complete successfully.
11456 lpfc_pci_function_reset(struct lpfc_hba *phba)
11458 LPFC_MBOXQ_t *mboxq;
11459 uint32_t rc = 0, if_type;
11460 uint32_t shdr_status, shdr_add_status;
11462 uint32_t port_reset = 0;
11463 union lpfc_sli4_cfg_shdr *shdr;
11464 struct lpfc_register reg_data;
11467 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11469 case LPFC_SLI_INTF_IF_TYPE_0:
11470 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11473 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11474 "0494 Unable to allocate memory for "
11475 "issuing SLI_FUNCTION_RESET mailbox "
11480 /* Setup PCI function reset mailbox-ioctl command */
11481 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11482 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11483 LPFC_SLI4_MBX_EMBED);
11484 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11485 shdr = (union lpfc_sli4_cfg_shdr *)
11486 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11487 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11488 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11490 mempool_free(mboxq, phba->mbox_mem_pool);
11491 if (shdr_status || shdr_add_status || rc) {
11492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11493 "0495 SLI_FUNCTION_RESET mailbox "
11494 "failed with status x%x add_status x%x,"
11495 " mbx status x%x\n",
11496 shdr_status, shdr_add_status, rc);
11500 case LPFC_SLI_INTF_IF_TYPE_2:
11501 case LPFC_SLI_INTF_IF_TYPE_6:
11504 * Poll the Port Status Register and wait for RDY for
11505 * up to 30 seconds. If the port doesn't respond, treat
11508 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11509 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11510 STATUSregaddr, ®_data.word0)) {
11514 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11519 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11520 phba->work_status[0] = readl(
11521 phba->sli4_hba.u.if_type2.ERR1regaddr);
11522 phba->work_status[1] = readl(
11523 phba->sli4_hba.u.if_type2.ERR2regaddr);
11524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11525 "2890 Port not ready, port status reg "
11526 "0x%x error 1=0x%x, error 2=0x%x\n",
11528 phba->work_status[0],
11529 phba->work_status[1]);
11536 * Reset the port now
11538 reg_data.word0 = 0;
11539 bf_set(lpfc_sliport_ctrl_end, ®_data,
11540 LPFC_SLIPORT_LITTLE_ENDIAN);
11541 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11542 LPFC_SLIPORT_INIT_PORT);
11543 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11546 pci_read_config_word(phba->pcidev,
11547 PCI_DEVICE_ID, &devid);
11552 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11558 case LPFC_SLI_INTF_IF_TYPE_1:
11564 /* Catch the not-ready port failure after a port reset. */
11566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11567 "3317 HBA not functional: IP Reset Failed "
11568 "try: echo fw_reset > board_mode\n");
11576 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11577 * @phba: pointer to lpfc hba data structure.
11579 * This routine is invoked to set up the PCI device memory space for device
11580 * with SLI-4 interface spec.
11584 * other values - error
11587 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11589 struct pci_dev *pdev = phba->pcidev;
11590 unsigned long bar0map_len, bar1map_len, bar2map_len;
11597 /* Set the device DMA mask size */
11598 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11600 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11605 * The BARs and register set definitions and offset locations are
11606 * dependent on the if_type.
11608 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11609 &phba->sli4_hba.sli_intf.word0)) {
11613 /* There is no SLI3 failback for SLI4 devices. */
11614 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11615 LPFC_SLI_INTF_VALID) {
11616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11617 "2894 SLI_INTF reg contents invalid "
11618 "sli_intf reg 0x%x\n",
11619 phba->sli4_hba.sli_intf.word0);
11623 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11625 * Get the bus address of SLI4 device Bar regions and the
11626 * number of bytes required by each mapping. The mapping of the
11627 * particular PCI BARs regions is dependent on the type of
11630 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11631 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11632 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11635 * Map SLI4 PCI Config Space Register base to a kernel virtual
11638 phba->sli4_hba.conf_regs_memmap_p =
11639 ioremap(phba->pci_bar0_map, bar0map_len);
11640 if (!phba->sli4_hba.conf_regs_memmap_p) {
11641 dev_printk(KERN_ERR, &pdev->dev,
11642 "ioremap failed for SLI4 PCI config "
11646 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11647 /* Set up BAR0 PCI config space register memory map */
11648 lpfc_sli4_bar0_register_memmap(phba, if_type);
11650 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11651 bar0map_len = pci_resource_len(pdev, 1);
11652 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11653 dev_printk(KERN_ERR, &pdev->dev,
11654 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11657 phba->sli4_hba.conf_regs_memmap_p =
11658 ioremap(phba->pci_bar0_map, bar0map_len);
11659 if (!phba->sli4_hba.conf_regs_memmap_p) {
11660 dev_printk(KERN_ERR, &pdev->dev,
11661 "ioremap failed for SLI4 PCI config "
11665 lpfc_sli4_bar0_register_memmap(phba, if_type);
11668 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11669 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11671 * Map SLI4 if type 0 HBA Control Register base to a
11672 * kernel virtual address and setup the registers.
11674 phba->pci_bar1_map = pci_resource_start(pdev,
11676 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11677 phba->sli4_hba.ctrl_regs_memmap_p =
11678 ioremap(phba->pci_bar1_map,
11680 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11681 dev_err(&pdev->dev,
11682 "ioremap failed for SLI4 HBA "
11683 "control registers.\n");
11685 goto out_iounmap_conf;
11687 phba->pci_bar2_memmap_p =
11688 phba->sli4_hba.ctrl_regs_memmap_p;
11689 lpfc_sli4_bar1_register_memmap(phba, if_type);
11692 goto out_iounmap_conf;
11696 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11697 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11699 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11700 * virtual address and setup the registers.
11702 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11703 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11704 phba->sli4_hba.drbl_regs_memmap_p =
11705 ioremap(phba->pci_bar1_map, bar1map_len);
11706 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11707 dev_err(&pdev->dev,
11708 "ioremap failed for SLI4 HBA doorbell registers.\n");
11710 goto out_iounmap_conf;
11712 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11713 lpfc_sli4_bar1_register_memmap(phba, if_type);
11716 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11717 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11719 * Map SLI4 if type 0 HBA Doorbell Register base to
11720 * a kernel virtual address and setup the registers.
11722 phba->pci_bar2_map = pci_resource_start(pdev,
11724 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11725 phba->sli4_hba.drbl_regs_memmap_p =
11726 ioremap(phba->pci_bar2_map,
11728 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11729 dev_err(&pdev->dev,
11730 "ioremap failed for SLI4 HBA"
11731 " doorbell registers.\n");
11733 goto out_iounmap_ctrl;
11735 phba->pci_bar4_memmap_p =
11736 phba->sli4_hba.drbl_regs_memmap_p;
11737 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11739 goto out_iounmap_all;
11742 goto out_iounmap_all;
11746 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11747 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11749 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11750 * virtual address and setup the registers.
11752 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11753 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11754 phba->sli4_hba.dpp_regs_memmap_p =
11755 ioremap(phba->pci_bar2_map, bar2map_len);
11756 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11757 dev_err(&pdev->dev,
11758 "ioremap failed for SLI4 HBA dpp registers.\n");
11760 goto out_iounmap_ctrl;
11762 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11765 /* Set up the EQ/CQ register handeling functions now */
11767 case LPFC_SLI_INTF_IF_TYPE_0:
11768 case LPFC_SLI_INTF_IF_TYPE_2:
11769 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11770 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11771 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
11773 case LPFC_SLI_INTF_IF_TYPE_6:
11774 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
11775 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
11776 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
11785 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11787 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11789 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11795 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
11796 * @phba: pointer to lpfc hba data structure.
11798 * This routine is invoked to unset the PCI device memory space for device
11799 * with SLI-4 interface spec.
11802 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
11805 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11808 case LPFC_SLI_INTF_IF_TYPE_0:
11809 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11810 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
11811 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11813 case LPFC_SLI_INTF_IF_TYPE_2:
11814 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11816 case LPFC_SLI_INTF_IF_TYPE_6:
11817 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
11818 iounmap(phba->sli4_hba.conf_regs_memmap_p);
11819 if (phba->sli4_hba.dpp_regs_memmap_p)
11820 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
11822 case LPFC_SLI_INTF_IF_TYPE_1:
11824 dev_printk(KERN_ERR, &phba->pcidev->dev,
11825 "FATAL - unsupported SLI4 interface type - %d\n",
11832 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
11833 * @phba: pointer to lpfc hba data structure.
11835 * This routine is invoked to enable the MSI-X interrupt vectors to device
11836 * with SLI-3 interface specs.
11840 * other values - error
11843 lpfc_sli_enable_msix(struct lpfc_hba *phba)
11848 /* Set up MSI-X multi-message vectors */
11849 rc = pci_alloc_irq_vectors(phba->pcidev,
11850 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
11852 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11853 "0420 PCI enable MSI-X failed (%d)\n", rc);
11858 * Assign MSI-X vectors to interrupt handlers
11861 /* vector-0 is associated to slow-path handler */
11862 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
11863 &lpfc_sli_sp_intr_handler, 0,
11864 LPFC_SP_DRIVER_HANDLER_NAME, phba);
11866 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11867 "0421 MSI-X slow-path request_irq failed "
11872 /* vector-1 is associated to fast-path handler */
11873 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
11874 &lpfc_sli_fp_intr_handler, 0,
11875 LPFC_FP_DRIVER_HANDLER_NAME, phba);
11878 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11879 "0429 MSI-X fast-path request_irq failed "
11885 * Configure HBA MSI-X attention conditions to messages
11887 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11892 "0474 Unable to allocate memory for issuing "
11893 "MBOX_CONFIG_MSI command\n");
11896 rc = lpfc_config_msi(phba, pmb);
11899 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11900 if (rc != MBX_SUCCESS) {
11901 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
11902 "0351 Config MSI mailbox command failed, "
11903 "mbxCmd x%x, mbxStatus x%x\n",
11904 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
11908 /* Free memory allocated for mailbox command */
11909 mempool_free(pmb, phba->mbox_mem_pool);
11913 /* Free memory allocated for mailbox command */
11914 mempool_free(pmb, phba->mbox_mem_pool);
11917 /* free the irq already requested */
11918 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
11921 /* free the irq already requested */
11922 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
11925 /* Unconfigure MSI-X capability structure */
11926 pci_free_irq_vectors(phba->pcidev);
11933 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
11934 * @phba: pointer to lpfc hba data structure.
11936 * This routine is invoked to enable the MSI interrupt mode to device with
11937 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
11938 * enable the MSI vector. The device driver is responsible for calling the
11939 * request_irq() to register MSI vector with a interrupt the handler, which
11940 * is done in this function.
11944 * other values - error
11947 lpfc_sli_enable_msi(struct lpfc_hba *phba)
11951 rc = pci_enable_msi(phba->pcidev);
11953 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11954 "0462 PCI enable MSI mode success.\n");
11956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11957 "0471 PCI enable MSI mode failed (%d)\n", rc);
11961 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
11962 0, LPFC_DRIVER_NAME, phba);
11964 pci_disable_msi(phba->pcidev);
11965 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11966 "0478 MSI request_irq failed (%d)\n", rc);
11972 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
11973 * @phba: pointer to lpfc hba data structure.
11974 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
11976 * This routine is invoked to enable device interrupt and associate driver's
11977 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
11978 * spec. Depends on the interrupt mode configured to the driver, the driver
11979 * will try to fallback from the configured interrupt mode to an interrupt
11980 * mode which is supported by the platform, kernel, and device in the order
11982 * MSI-X -> MSI -> IRQ.
11986 * other values - error
11989 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
11991 uint32_t intr_mode = LPFC_INTR_ERROR;
11994 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
11995 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
11998 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12000 if (cfg_mode == 2) {
12001 /* Now, try to enable MSI-X interrupt mode */
12002 retval = lpfc_sli_enable_msix(phba);
12004 /* Indicate initialization to MSI-X mode */
12005 phba->intr_type = MSIX;
12010 /* Fallback to MSI if MSI-X initialization failed */
12011 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12012 retval = lpfc_sli_enable_msi(phba);
12014 /* Indicate initialization to MSI mode */
12015 phba->intr_type = MSI;
12020 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12021 if (phba->intr_type == NONE) {
12022 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12023 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12025 /* Indicate initialization to INTx mode */
12026 phba->intr_type = INTx;
12034 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12035 * @phba: pointer to lpfc hba data structure.
12037 * This routine is invoked to disable device interrupt and disassociate the
12038 * driver's interrupt handler(s) from interrupt vector(s) to device with
12039 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12040 * release the interrupt vector(s) for the message signaled interrupt.
12043 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12047 if (phba->intr_type == MSIX)
12048 nr_irqs = LPFC_MSIX_VECTORS;
12052 for (i = 0; i < nr_irqs; i++)
12053 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12054 pci_free_irq_vectors(phba->pcidev);
12056 /* Reset interrupt management states */
12057 phba->intr_type = NONE;
12058 phba->sli.slistat.sli_intr = 0;
12062 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12063 * @phba: pointer to lpfc hba data structure.
12064 * @id: EQ vector index or Hardware Queue index
12065 * @match: LPFC_FIND_BY_EQ = match by EQ
12066 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12067 * Return the CPU that matches the selection criteria
12070 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12072 struct lpfc_vector_map_info *cpup;
12075 /* Loop through all CPUs */
12076 for_each_present_cpu(cpu) {
12077 cpup = &phba->sli4_hba.cpu_map[cpu];
12079 /* If we are matching by EQ, there may be multiple CPUs using
12080 * using the same vector, so select the one with
12081 * LPFC_CPU_FIRST_IRQ set.
12083 if ((match == LPFC_FIND_BY_EQ) &&
12084 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12088 /* If matching by HDWQ, select the first CPU that matches */
12089 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12097 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12098 * @phba: pointer to lpfc hba data structure.
12099 * @cpu: CPU map index
12100 * @phys_id: CPU package physical id
12101 * @core_id: CPU core id
12104 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12105 uint16_t phys_id, uint16_t core_id)
12107 struct lpfc_vector_map_info *cpup;
12110 for_each_present_cpu(idx) {
12111 cpup = &phba->sli4_hba.cpu_map[idx];
12112 /* Does the cpup match the one we are looking for */
12113 if ((cpup->phys_id == phys_id) &&
12114 (cpup->core_id == core_id) &&
12123 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12124 * @phba: pointer to lpfc hba data structure.
12125 * @eqidx: index for eq and irq vector
12126 * @flag: flags to set for vector_map structure
12127 * @cpu: cpu used to index vector_map structure
12129 * The routine assigns eq info into vector_map structure
12132 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12135 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12136 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12139 cpup->flag |= flag;
12141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12142 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12143 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12147 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12148 * @phba: pointer to lpfc hba data structure.
12150 * The routine initializes the cpu_map array structure
12153 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12155 struct lpfc_vector_map_info *cpup;
12156 struct lpfc_eq_intr_info *eqi;
12159 for_each_possible_cpu(cpu) {
12160 cpup = &phba->sli4_hba.cpu_map[cpu];
12161 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12162 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12163 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12164 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12166 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12167 INIT_LIST_HEAD(&eqi->list);
12173 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12174 * @phba: pointer to lpfc hba data structure.
12176 * The routine initializes the hba_eq_hdl array structure
12179 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12181 struct lpfc_hba_eq_hdl *eqhdl;
12184 for (i = 0; i < phba->cfg_irq_chann; i++) {
12185 eqhdl = lpfc_get_eq_hdl(i);
12186 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12187 eqhdl->phba = phba;
12192 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12193 * @phba: pointer to lpfc hba data structure.
12194 * @vectors: number of msix vectors allocated.
12196 * The routine will figure out the CPU affinity assignment for every
12197 * MSI-X vector allocated for the HBA.
12198 * In addition, the CPU to IO channel mapping will be calculated
12199 * and the phba->sli4_hba.cpu_map array will reflect this.
12202 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12204 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12205 int max_phys_id, min_phys_id;
12206 int max_core_id, min_core_id;
12207 struct lpfc_vector_map_info *cpup;
12208 struct lpfc_vector_map_info *new_cpup;
12210 struct cpuinfo_x86 *cpuinfo;
12212 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12213 struct lpfc_hdwq_stat *c_stat;
12217 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12219 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12221 /* Update CPU map with physical id and core id of each CPU */
12222 for_each_present_cpu(cpu) {
12223 cpup = &phba->sli4_hba.cpu_map[cpu];
12225 cpuinfo = &cpu_data(cpu);
12226 cpup->phys_id = cpuinfo->phys_proc_id;
12227 cpup->core_id = cpuinfo->cpu_core_id;
12228 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12229 cpup->flag |= LPFC_CPU_MAP_HYPER;
12231 /* No distinction between CPUs for other platforms */
12233 cpup->core_id = cpu;
12236 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12237 "3328 CPU %d physid %d coreid %d flag x%x\n",
12238 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12240 if (cpup->phys_id > max_phys_id)
12241 max_phys_id = cpup->phys_id;
12242 if (cpup->phys_id < min_phys_id)
12243 min_phys_id = cpup->phys_id;
12245 if (cpup->core_id > max_core_id)
12246 max_core_id = cpup->core_id;
12247 if (cpup->core_id < min_core_id)
12248 min_core_id = cpup->core_id;
12251 /* After looking at each irq vector assigned to this pcidev, its
12252 * possible to see that not ALL CPUs have been accounted for.
12253 * Next we will set any unassigned (unaffinitized) cpu map
12254 * entries to a IRQ on the same phys_id.
12256 first_cpu = cpumask_first(cpu_present_mask);
12257 start_cpu = first_cpu;
12259 for_each_present_cpu(cpu) {
12260 cpup = &phba->sli4_hba.cpu_map[cpu];
12262 /* Is this CPU entry unassigned */
12263 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12264 /* Mark CPU as IRQ not assigned by the kernel */
12265 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12267 /* If so, find a new_cpup thats on the the SAME
12268 * phys_id as cpup. start_cpu will start where we
12269 * left off so all unassigned entries don't get assgined
12270 * the IRQ of the first entry.
12272 new_cpu = start_cpu;
12273 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12274 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12275 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12276 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12277 (new_cpup->phys_id == cpup->phys_id))
12279 new_cpu = cpumask_next(
12280 new_cpu, cpu_present_mask);
12281 if (new_cpu == nr_cpumask_bits)
12282 new_cpu = first_cpu;
12284 /* At this point, we leave the CPU as unassigned */
12287 /* We found a matching phys_id, so copy the IRQ info */
12288 cpup->eq = new_cpup->eq;
12290 /* Bump start_cpu to the next slot to minmize the
12291 * chance of having multiple unassigned CPU entries
12292 * selecting the same IRQ.
12294 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12295 if (start_cpu == nr_cpumask_bits)
12296 start_cpu = first_cpu;
12298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12299 "3337 Set Affinity: CPU %d "
12300 "eq %d from peer cpu %d same "
12302 cpu, cpup->eq, new_cpu,
12307 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12308 start_cpu = first_cpu;
12310 for_each_present_cpu(cpu) {
12311 cpup = &phba->sli4_hba.cpu_map[cpu];
12313 /* Is this entry unassigned */
12314 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12315 /* Mark it as IRQ not assigned by the kernel */
12316 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12318 /* If so, find a new_cpup thats on ANY phys_id
12319 * as the cpup. start_cpu will start where we
12320 * left off so all unassigned entries don't get
12321 * assigned the IRQ of the first entry.
12323 new_cpu = start_cpu;
12324 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12325 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12326 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12327 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12329 new_cpu = cpumask_next(
12330 new_cpu, cpu_present_mask);
12331 if (new_cpu == nr_cpumask_bits)
12332 new_cpu = first_cpu;
12334 /* We should never leave an entry unassigned */
12335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12336 "3339 Set Affinity: CPU %d "
12337 "eq %d UNASSIGNED\n",
12338 cpup->hdwq, cpup->eq);
12341 /* We found an available entry, copy the IRQ info */
12342 cpup->eq = new_cpup->eq;
12344 /* Bump start_cpu to the next slot to minmize the
12345 * chance of having multiple unassigned CPU entries
12346 * selecting the same IRQ.
12348 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12349 if (start_cpu == nr_cpumask_bits)
12350 start_cpu = first_cpu;
12352 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12353 "3338 Set Affinity: CPU %d "
12354 "eq %d from peer cpu %d (%d/%d)\n",
12355 cpu, cpup->eq, new_cpu,
12356 new_cpup->phys_id, new_cpup->core_id);
12360 /* Assign hdwq indices that are unique across all cpus in the map
12361 * that are also FIRST_CPUs.
12364 for_each_present_cpu(cpu) {
12365 cpup = &phba->sli4_hba.cpu_map[cpu];
12367 /* Only FIRST IRQs get a hdwq index assignment. */
12368 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12371 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12374 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12375 "3333 Set Affinity: CPU %d (phys %d core %d): "
12376 "hdwq %d eq %d flg x%x\n",
12377 cpu, cpup->phys_id, cpup->core_id,
12378 cpup->hdwq, cpup->eq, cpup->flag);
12380 /* Associate a hdwq with each cpu_map entry
12381 * This will be 1 to 1 - hdwq to cpu, unless there are less
12382 * hardware queues then CPUs. For that case we will just round-robin
12383 * the available hardware queues as they get assigned to CPUs.
12384 * The next_idx is the idx from the FIRST_CPU loop above to account
12385 * for irq_chann < hdwq. The idx is used for round-robin assignments
12386 * and needs to start at 0.
12391 for_each_present_cpu(cpu) {
12392 cpup = &phba->sli4_hba.cpu_map[cpu];
12394 /* FIRST cpus are already mapped. */
12395 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12398 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12399 * of the unassigned cpus to the next idx so that all
12400 * hdw queues are fully utilized.
12402 if (next_idx < phba->cfg_hdw_queue) {
12403 cpup->hdwq = next_idx;
12408 /* Not a First CPU and all hdw_queues are used. Reuse a
12409 * Hardware Queue for another CPU, so be smart about it
12410 * and pick one that has its IRQ/EQ mapped to the same phys_id
12411 * (CPU package) and core_id.
12413 new_cpu = start_cpu;
12414 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12415 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12416 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12417 new_cpup->phys_id == cpup->phys_id &&
12418 new_cpup->core_id == cpup->core_id) {
12421 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12422 if (new_cpu == nr_cpumask_bits)
12423 new_cpu = first_cpu;
12426 /* If we can't match both phys_id and core_id,
12427 * settle for just a phys_id match.
12429 new_cpu = start_cpu;
12430 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12431 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12432 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12433 new_cpup->phys_id == cpup->phys_id)
12436 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12437 if (new_cpu == nr_cpumask_bits)
12438 new_cpu = first_cpu;
12441 /* Otherwise just round robin on cfg_hdw_queue */
12442 cpup->hdwq = idx % phba->cfg_hdw_queue;
12446 /* We found an available entry, copy the IRQ info */
12447 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12448 if (start_cpu == nr_cpumask_bits)
12449 start_cpu = first_cpu;
12450 cpup->hdwq = new_cpup->hdwq;
12452 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12453 "3335 Set Affinity: CPU %d (phys %d core %d): "
12454 "hdwq %d eq %d flg x%x\n",
12455 cpu, cpup->phys_id, cpup->core_id,
12456 cpup->hdwq, cpup->eq, cpup->flag);
12460 * Initialize the cpu_map slots for not-present cpus in case
12461 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12464 for_each_possible_cpu(cpu) {
12465 cpup = &phba->sli4_hba.cpu_map[cpu];
12466 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12467 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12468 c_stat->hdwq_no = cpup->hdwq;
12470 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12473 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12474 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12475 c_stat->hdwq_no = cpup->hdwq;
12477 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12478 "3340 Set Affinity: not present "
12479 "CPU %d hdwq %d\n",
12483 /* The cpu_map array will be used later during initialization
12484 * when EQ / CQ / WQs are allocated and configured.
12490 * lpfc_cpuhp_get_eq
12492 * @phba: pointer to lpfc hba data structure.
12493 * @cpu: cpu going offline
12494 * @eqlist: eq list to append to
12497 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12498 struct list_head *eqlist)
12500 const struct cpumask *maskp;
12501 struct lpfc_queue *eq;
12502 struct cpumask *tmp;
12505 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12509 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12510 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12514 * if irq is not affinitized to the cpu going
12515 * then we don't need to poll the eq attached
12518 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12520 /* get the cpus that are online and are affini-
12521 * tized to this irq vector. If the count is
12522 * more than 1 then cpuhp is not going to shut-
12523 * down this vector. Since this cpu has not
12524 * gone offline yet, we need >1.
12526 cpumask_and(tmp, maskp, cpu_online_mask);
12527 if (cpumask_weight(tmp) > 1)
12530 /* Now that we have an irq to shutdown, get the eq
12531 * mapped to this irq. Note: multiple hdwq's in
12532 * the software can share an eq, but eventually
12533 * only eq will be mapped to this vector
12535 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12536 list_add(&eq->_poll_list, eqlist);
12542 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12544 if (phba->sli_rev != LPFC_SLI_REV4)
12547 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12550 * unregistering the instance doesn't stop the polling
12551 * timer. Wait for the poll timer to retire.
12554 del_timer_sync(&phba->cpuhp_poll_timer);
12557 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12559 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12562 __lpfc_cpuhp_remove(phba);
12565 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12567 if (phba->sli_rev != LPFC_SLI_REV4)
12572 if (!list_empty(&phba->poll_list))
12573 mod_timer(&phba->cpuhp_poll_timer,
12574 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12578 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12582 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12584 if (phba->pport->load_flag & FC_UNLOADING) {
12589 if (phba->sli_rev != LPFC_SLI_REV4) {
12594 /* proceed with the hotplug */
12599 * lpfc_irq_set_aff - set IRQ affinity
12600 * @eqhdl: EQ handle
12601 * @cpu: cpu to set affinity
12605 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12607 cpumask_clear(&eqhdl->aff_mask);
12608 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12609 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12610 irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
12614 * lpfc_irq_clear_aff - clear IRQ affinity
12615 * @eqhdl: EQ handle
12619 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12621 cpumask_clear(&eqhdl->aff_mask);
12622 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12626 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12627 * @phba: pointer to HBA context object.
12628 * @cpu: cpu going offline/online
12629 * @offline: true, cpu is going offline. false, cpu is coming online.
12631 * If cpu is going offline, we'll try our best effort to find the next
12632 * online cpu on the phba's original_mask and migrate all offlining IRQ
12635 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12637 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12638 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12642 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12644 struct lpfc_vector_map_info *cpup;
12645 struct cpumask *aff_mask;
12646 unsigned int cpu_select, cpu_next, idx;
12647 const struct cpumask *orig_mask;
12649 if (phba->irq_chann_mode == NORMAL_MODE)
12652 orig_mask = &phba->sli4_hba.irq_aff_mask;
12654 if (!cpumask_test_cpu(cpu, orig_mask))
12657 cpup = &phba->sli4_hba.cpu_map[cpu];
12659 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12663 /* Find next online CPU on original mask */
12664 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12665 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12667 /* Found a valid CPU */
12668 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12669 /* Go through each eqhdl and ensure offlining
12670 * cpu aff_mask is migrated
12672 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12673 aff_mask = lpfc_get_aff_mask(idx);
12675 /* Migrate affinity */
12676 if (cpumask_test_cpu(cpu, aff_mask))
12677 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12681 /* Rely on irqbalance if no online CPUs left on NUMA */
12682 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12683 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12686 /* Migrate affinity back to this CPU */
12687 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12691 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12693 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12694 struct lpfc_queue *eq, *next;
12699 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12703 if (__lpfc_cpuhp_checks(phba, &retval))
12706 lpfc_irq_rebalance(phba, cpu, true);
12708 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12712 /* start polling on these eq's */
12713 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12714 list_del_init(&eq->_poll_list);
12715 lpfc_sli4_start_polling(eq);
12721 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12723 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12724 struct lpfc_queue *eq, *next;
12729 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12733 if (__lpfc_cpuhp_checks(phba, &retval))
12736 lpfc_irq_rebalance(phba, cpu, false);
12738 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12739 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12741 lpfc_sli4_stop_polling(eq);
12748 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12749 * @phba: pointer to lpfc hba data structure.
12751 * This routine is invoked to enable the MSI-X interrupt vectors to device
12752 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12753 * to cpus on the system.
12755 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12756 * the number of cpus on the same numa node as this adapter. The vectors are
12757 * allocated without requesting OS affinity mapping. A vector will be
12758 * allocated and assigned to each online and offline cpu. If the cpu is
12759 * online, then affinity will be set to that cpu. If the cpu is offline, then
12760 * affinity will be set to the nearest peer cpu within the numa node that is
12761 * online. If there are no online cpus within the numa node, affinity is not
12762 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12763 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12766 * If numa mode is not enabled and there is more than 1 vector allocated, then
12767 * the driver relies on the managed irq interface where the OS assigns vector to
12768 * cpu affinity. The driver will then use that affinity mapping to setup its
12769 * cpu mapping table.
12773 * other values - error
12776 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12778 int vectors, rc, index;
12780 const struct cpumask *aff_mask = NULL;
12781 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
12782 struct lpfc_vector_map_info *cpup;
12783 struct lpfc_hba_eq_hdl *eqhdl;
12784 const struct cpumask *maskp;
12785 unsigned int flags = PCI_IRQ_MSIX;
12787 /* Set up MSI-X multi-message vectors */
12788 vectors = phba->cfg_irq_chann;
12790 if (phba->irq_chann_mode != NORMAL_MODE)
12791 aff_mask = &phba->sli4_hba.irq_aff_mask;
12794 cpu_cnt = cpumask_weight(aff_mask);
12795 vectors = min(phba->cfg_irq_chann, cpu_cnt);
12797 /* cpu: iterates over aff_mask including offline or online
12798 * cpu_select: iterates over online aff_mask to set affinity
12800 cpu = cpumask_first(aff_mask);
12801 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12803 flags |= PCI_IRQ_AFFINITY;
12806 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
12808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12809 "0484 PCI enable MSI-X failed (%d)\n", rc);
12814 /* Assign MSI-X vectors to interrupt handlers */
12815 for (index = 0; index < vectors; index++) {
12816 eqhdl = lpfc_get_eq_hdl(index);
12817 name = eqhdl->handler_name;
12818 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
12819 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
12820 LPFC_DRIVER_HANDLER_NAME"%d", index);
12822 eqhdl->idx = index;
12823 rc = request_irq(pci_irq_vector(phba->pcidev, index),
12824 &lpfc_sli4_hba_intr_handler, 0,
12827 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12828 "0486 MSI-X fast-path (%d) "
12829 "request_irq failed (%d)\n", index, rc);
12833 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
12836 /* If found a neighboring online cpu, set affinity */
12837 if (cpu_select < nr_cpu_ids)
12838 lpfc_irq_set_aff(eqhdl, cpu_select);
12840 /* Assign EQ to cpu_map */
12841 lpfc_assign_eq_map_info(phba, index,
12842 LPFC_CPU_FIRST_IRQ,
12845 /* Iterate to next offline or online cpu in aff_mask */
12846 cpu = cpumask_next(cpu, aff_mask);
12848 /* Find next online cpu in aff_mask to set affinity */
12849 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
12850 } else if (vectors == 1) {
12851 cpu = cpumask_first(cpu_present_mask);
12852 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
12855 maskp = pci_irq_get_affinity(phba->pcidev, index);
12857 /* Loop through all CPUs associated with vector index */
12858 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
12859 cpup = &phba->sli4_hba.cpu_map[cpu];
12861 /* If this is the first CPU thats assigned to
12862 * this vector, set LPFC_CPU_FIRST_IRQ.
12864 * With certain platforms its possible that irq
12865 * vectors are affinitized to all the cpu's.
12866 * This can result in each cpu_map.eq to be set
12867 * to the last vector, resulting in overwrite
12868 * of all the previous cpu_map.eq. Ensure that
12869 * each vector receives a place in cpu_map.
12870 * Later call to lpfc_cpu_affinity_check will
12871 * ensure we are nicely balanced out.
12873 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
12875 lpfc_assign_eq_map_info(phba, index,
12876 LPFC_CPU_FIRST_IRQ,
12883 if (vectors != phba->cfg_irq_chann) {
12884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12885 "3238 Reducing IO channels to match number of "
12886 "MSI-X vectors, requested %d got %d\n",
12887 phba->cfg_irq_chann, vectors);
12888 if (phba->cfg_irq_chann > vectors)
12889 phba->cfg_irq_chann = vectors;
12895 /* free the irq already requested */
12896 for (--index; index >= 0; index--) {
12897 eqhdl = lpfc_get_eq_hdl(index);
12898 lpfc_irq_clear_aff(eqhdl);
12899 irq_set_affinity_hint(eqhdl->irq, NULL);
12900 free_irq(eqhdl->irq, eqhdl);
12903 /* Unconfigure MSI-X capability structure */
12904 pci_free_irq_vectors(phba->pcidev);
12911 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
12912 * @phba: pointer to lpfc hba data structure.
12914 * This routine is invoked to enable the MSI interrupt mode to device with
12915 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
12916 * called to enable the MSI vector. The device driver is responsible for
12917 * calling the request_irq() to register MSI vector with a interrupt the
12918 * handler, which is done in this function.
12922 * other values - error
12925 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
12929 struct lpfc_hba_eq_hdl *eqhdl;
12931 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
12932 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
12934 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12935 "0487 PCI enable MSI mode success.\n");
12937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12938 "0488 PCI enable MSI mode failed (%d)\n", rc);
12939 return rc ? rc : -1;
12942 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
12943 0, LPFC_DRIVER_NAME, phba);
12945 pci_free_irq_vectors(phba->pcidev);
12946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12947 "0490 MSI request_irq failed (%d)\n", rc);
12951 eqhdl = lpfc_get_eq_hdl(0);
12952 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
12954 cpu = cpumask_first(cpu_present_mask);
12955 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
12957 for (index = 0; index < phba->cfg_irq_chann; index++) {
12958 eqhdl = lpfc_get_eq_hdl(index);
12959 eqhdl->idx = index;
12966 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
12967 * @phba: pointer to lpfc hba data structure.
12968 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12970 * This routine is invoked to enable device interrupt and associate driver's
12971 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
12972 * interface spec. Depends on the interrupt mode configured to the driver,
12973 * the driver will try to fallback from the configured interrupt mode to an
12974 * interrupt mode which is supported by the platform, kernel, and device in
12976 * MSI-X -> MSI -> IRQ.
12980 * other values - error
12983 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12985 uint32_t intr_mode = LPFC_INTR_ERROR;
12988 if (cfg_mode == 2) {
12989 /* Preparation before conf_msi mbox cmd */
12992 /* Now, try to enable MSI-X interrupt mode */
12993 retval = lpfc_sli4_enable_msix(phba);
12995 /* Indicate initialization to MSI-X mode */
12996 phba->intr_type = MSIX;
13002 /* Fallback to MSI if MSI-X initialization failed */
13003 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13004 retval = lpfc_sli4_enable_msi(phba);
13006 /* Indicate initialization to MSI mode */
13007 phba->intr_type = MSI;
13012 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13013 if (phba->intr_type == NONE) {
13014 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13015 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13017 struct lpfc_hba_eq_hdl *eqhdl;
13020 /* Indicate initialization to INTx mode */
13021 phba->intr_type = INTx;
13024 eqhdl = lpfc_get_eq_hdl(0);
13025 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13027 cpu = cpumask_first(cpu_present_mask);
13028 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13030 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13031 eqhdl = lpfc_get_eq_hdl(idx);
13040 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13041 * @phba: pointer to lpfc hba data structure.
13043 * This routine is invoked to disable device interrupt and disassociate
13044 * the driver's interrupt handler(s) from interrupt vector(s) to device
13045 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13046 * will release the interrupt vector(s) for the message signaled interrupt.
13049 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13051 /* Disable the currently initialized interrupt mode */
13052 if (phba->intr_type == MSIX) {
13054 struct lpfc_hba_eq_hdl *eqhdl;
13056 /* Free up MSI-X multi-message vectors */
13057 for (index = 0; index < phba->cfg_irq_chann; index++) {
13058 eqhdl = lpfc_get_eq_hdl(index);
13059 lpfc_irq_clear_aff(eqhdl);
13060 irq_set_affinity_hint(eqhdl->irq, NULL);
13061 free_irq(eqhdl->irq, eqhdl);
13064 free_irq(phba->pcidev->irq, phba);
13067 pci_free_irq_vectors(phba->pcidev);
13069 /* Reset interrupt management states */
13070 phba->intr_type = NONE;
13071 phba->sli.slistat.sli_intr = 0;
13075 * lpfc_unset_hba - Unset SLI3 hba device initialization
13076 * @phba: pointer to lpfc hba data structure.
13078 * This routine is invoked to unset the HBA device initialization steps to
13079 * a device with SLI-3 interface spec.
13082 lpfc_unset_hba(struct lpfc_hba *phba)
13084 struct lpfc_vport *vport = phba->pport;
13085 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13087 spin_lock_irq(shost->host_lock);
13088 vport->load_flag |= FC_UNLOADING;
13089 spin_unlock_irq(shost->host_lock);
13091 kfree(phba->vpi_bmask);
13092 kfree(phba->vpi_ids);
13094 lpfc_stop_hba_timers(phba);
13096 phba->pport->work_port_events = 0;
13098 lpfc_sli_hba_down(phba);
13100 lpfc_sli_brdrestart(phba);
13102 lpfc_sli_disable_intr(phba);
13108 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13109 * @phba: Pointer to HBA context object.
13111 * This function is called in the SLI4 code path to wait for completion
13112 * of device's XRIs exchange busy. It will check the XRI exchange busy
13113 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13114 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13115 * I/Os every 30 seconds, log error message, and wait forever. Only when
13116 * all XRI exchange busy complete, the driver unload shall proceed with
13117 * invoking the function reset ioctl mailbox command to the CNA and the
13118 * the rest of the driver unload resource release.
13121 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13123 struct lpfc_sli4_hdw_queue *qp;
13126 int io_xri_cmpl = 1;
13127 int nvmet_xri_cmpl = 1;
13128 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13130 /* Driver just aborted IOs during the hba_unset process. Pause
13131 * here to give the HBA time to complete the IO and get entries
13132 * into the abts lists.
13134 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13136 /* Wait for NVME pending IO to flush back to transport. */
13137 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13138 lpfc_nvme_wait_for_io_drain(phba);
13141 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13142 qp = &phba->sli4_hba.hdwq[idx];
13143 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13144 if (!io_xri_cmpl) /* if list is NOT empty */
13150 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13152 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13155 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13156 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13157 if (!nvmet_xri_cmpl)
13158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13159 "6424 NVMET XRI exchange busy "
13160 "wait time: %d seconds.\n",
13163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13164 "6100 IO XRI exchange busy "
13165 "wait time: %d seconds.\n",
13168 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13169 "2878 ELS XRI exchange busy "
13170 "wait time: %d seconds.\n",
13172 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13173 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13175 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13176 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13180 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13181 qp = &phba->sli4_hba.hdwq[idx];
13182 io_xri_cmpl = list_empty(
13183 &qp->lpfc_abts_io_buf_list);
13184 if (!io_xri_cmpl) /* if list is NOT empty */
13190 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13191 nvmet_xri_cmpl = list_empty(
13192 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13195 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13201 * lpfc_sli4_hba_unset - Unset the fcoe hba
13202 * @phba: Pointer to HBA context object.
13204 * This function is called in the SLI4 code path to reset the HBA's FCoE
13205 * function. The caller is not required to hold any lock. This routine
13206 * issues PCI function reset mailbox command to reset the FCoE function.
13207 * At the end of the function, it calls lpfc_hba_down_post function to
13208 * free any pending commands.
13211 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13214 LPFC_MBOXQ_t *mboxq;
13215 struct pci_dev *pdev = phba->pcidev;
13217 lpfc_stop_hba_timers(phba);
13218 hrtimer_cancel(&phba->cmf_timer);
13221 phba->sli4_hba.intr_enable = 0;
13224 * Gracefully wait out the potential current outstanding asynchronous
13228 /* First, block any pending async mailbox command from posted */
13229 spin_lock_irq(&phba->hbalock);
13230 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13231 spin_unlock_irq(&phba->hbalock);
13232 /* Now, trying to wait it out if we can */
13233 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13235 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13238 /* Forcefully release the outstanding mailbox command if timed out */
13239 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13240 spin_lock_irq(&phba->hbalock);
13241 mboxq = phba->sli.mbox_active;
13242 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13243 __lpfc_mbox_cmpl_put(phba, mboxq);
13244 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13245 phba->sli.mbox_active = NULL;
13246 spin_unlock_irq(&phba->hbalock);
13249 /* Abort all iocbs associated with the hba */
13250 lpfc_sli_hba_iocb_abort(phba);
13252 /* Wait for completion of device XRI exchange busy */
13253 lpfc_sli4_xri_exchange_busy_wait(phba);
13255 /* per-phba callback de-registration for hotplug event */
13257 lpfc_cpuhp_remove(phba);
13259 /* Disable PCI subsystem interrupt */
13260 lpfc_sli4_disable_intr(phba);
13262 /* Disable SR-IOV if enabled */
13263 if (phba->cfg_sriov_nr_virtfn)
13264 pci_disable_sriov(pdev);
13266 /* Stop kthread signal shall trigger work_done one more time */
13267 kthread_stop(phba->worker_thread);
13269 /* Disable FW logging to host memory */
13270 lpfc_ras_stop_fwlog(phba);
13272 /* Unset the queues shared with the hardware then release all
13273 * allocated resources.
13275 lpfc_sli4_queue_unset(phba);
13276 lpfc_sli4_queue_destroy(phba);
13278 /* Reset SLI4 HBA FCoE function */
13279 lpfc_pci_function_reset(phba);
13281 /* Free RAS DMA memory */
13282 if (phba->ras_fwlog.ras_enabled)
13283 lpfc_sli4_ras_dma_free(phba);
13285 /* Stop the SLI4 device port */
13287 phba->pport->work_port_events = 0;
13291 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13296 for (bit = 0; bit < 8; bit++) {
13297 msb = (crc >> 31) & 1;
13300 if (msb ^ (byte & 1)) {
13301 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13310 lpfc_cgn_reverse_bits(uint32_t wd)
13312 uint32_t result = 0;
13315 for (i = 0; i < 32; i++) {
13317 result |= (1 & (wd >> i));
13323 * The routine corresponds with the algorithm the HBA firmware
13324 * uses to validate the data integrity.
13327 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13331 uint8_t *data = (uint8_t *)ptr;
13333 for (i = 0; i < byteLen; ++i)
13334 crc = lpfc_cgn_crc32(crc, data[i]);
13336 result = ~lpfc_cgn_reverse_bits(crc);
13341 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13343 struct lpfc_cgn_info *cp;
13344 struct timespec64 cmpl_time;
13349 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13350 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13354 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13356 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13357 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13358 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13359 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13361 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
13362 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
13363 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13364 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13365 atomic64_set(&phba->cgn_latency_evt, 0);
13366 phba->cgn_evt_minute = 0;
13367 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13369 memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
13370 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13371 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13373 /* cgn parameters */
13374 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13375 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13376 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13377 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13379 ktime_get_real_ts64(&cmpl_time);
13380 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13382 cp->cgn_info_month = broken.tm_mon + 1;
13383 cp->cgn_info_day = broken.tm_mday;
13384 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13385 cp->cgn_info_hour = broken.tm_hour;
13386 cp->cgn_info_minute = broken.tm_min;
13387 cp->cgn_info_second = broken.tm_sec;
13389 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13390 "2643 CGNInfo Init: Start Time "
13391 "%d/%d/%d %d:%d:%d\n",
13392 cp->cgn_info_day, cp->cgn_info_month,
13393 cp->cgn_info_year, cp->cgn_info_hour,
13394 cp->cgn_info_minute, cp->cgn_info_second);
13396 /* Fill in default LUN qdepth */
13398 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13399 cp->cgn_lunq = cpu_to_le16(size);
13402 /* last used Index initialized to 0xff already */
13404 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13405 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13406 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13407 cp->cgn_info_crc = cpu_to_le32(crc);
13409 phba->cgn_evt_timestamp = jiffies +
13410 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13414 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13416 struct lpfc_cgn_info *cp;
13417 struct timespec64 cmpl_time;
13421 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13422 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13427 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13428 memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
13430 ktime_get_real_ts64(&cmpl_time);
13431 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13433 cp->cgn_stat_month = broken.tm_mon + 1;
13434 cp->cgn_stat_day = broken.tm_mday;
13435 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13436 cp->cgn_stat_hour = broken.tm_hour;
13437 cp->cgn_stat_minute = broken.tm_min;
13439 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13440 "2647 CGNstat Init: Start Time "
13441 "%d/%d/%d %d:%d\n",
13442 cp->cgn_stat_day, cp->cgn_stat_month,
13443 cp->cgn_stat_year, cp->cgn_stat_hour,
13444 cp->cgn_stat_minute);
13446 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13447 cp->cgn_info_crc = cpu_to_le32(crc);
13451 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13452 * @phba: Pointer to hba context object.
13453 * @reg: flag to determine register or unregister.
13456 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13458 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13459 union lpfc_sli4_cfg_shdr *shdr;
13460 uint32_t shdr_status, shdr_add_status;
13461 LPFC_MBOXQ_t *mboxq;
13467 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13470 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13471 "HBA state x%x reg %d\n",
13472 phba->pport->port_state, reg);
13476 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13477 sizeof(struct lpfc_sli4_cfg_mhdr));
13478 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13479 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13480 LPFC_SLI4_MBX_EMBED);
13481 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13482 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13484 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13486 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13487 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13488 reg_congestion_buf->addr_lo =
13489 putPaddrLow(phba->cgn_i->phys);
13490 reg_congestion_buf->addr_hi =
13491 putPaddrHigh(phba->cgn_i->phys);
13493 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13494 shdr = (union lpfc_sli4_cfg_shdr *)
13495 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13496 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13497 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13499 mempool_free(mboxq, phba->mbox_mem_pool);
13500 if (shdr_status || shdr_add_status || rc) {
13501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13502 "2642 REG_CONGESTION_BUF mailbox "
13503 "failed with status x%x add_status x%x,"
13504 " mbx status x%x reg %d\n",
13505 shdr_status, shdr_add_status, rc, reg);
13512 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13514 lpfc_cmf_stop(phba);
13515 return __lpfc_reg_congestion_buf(phba, 0);
13519 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13521 return __lpfc_reg_congestion_buf(phba, 1);
13525 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13526 * @phba: Pointer to HBA context object.
13527 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13529 * This function is called in the SLI4 code path to read the port's
13530 * sli4 capabilities.
13532 * This function may be be called from any context that can block-wait
13533 * for the completion. The expectation is that this routine is called
13534 * typically from probe_one or from the online routine.
13537 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13540 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13541 struct lpfc_pc_sli4_params *sli4_params;
13544 bool exp_wqcq_pages = true;
13545 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13548 * By default, the driver assumes the SLI4 port requires RPI
13549 * header postings. The SLI4_PARAM response will correct this
13552 phba->sli4_hba.rpi_hdrs_in_use = 1;
13554 /* Read the port's SLI4 Config Parameters */
13555 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13556 sizeof(struct lpfc_sli4_cfg_mhdr));
13557 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13558 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13559 length, LPFC_SLI4_MBX_EMBED);
13560 if (!phba->sli4_hba.intr_enable)
13561 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13563 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13564 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13568 sli4_params = &phba->sli4_hba.pc_sli4_params;
13569 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13570 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13571 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13572 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13573 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13574 mbx_sli4_parameters);
13575 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13576 mbx_sli4_parameters);
13577 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13578 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13580 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13581 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13582 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13583 mbx_sli4_parameters);
13584 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13585 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13586 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13587 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13588 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13589 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13590 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13591 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13592 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13593 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13594 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13595 mbx_sli4_parameters);
13596 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13597 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13598 mbx_sli4_parameters);
13599 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13600 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13602 /* Check for Extended Pre-Registered SGL support */
13603 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13605 /* Check for firmware nvme support */
13606 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13607 bf_get(cfg_xib, mbx_sli4_parameters));
13610 /* Save this to indicate the Firmware supports NVME */
13611 sli4_params->nvme = 1;
13613 /* Firmware NVME support, check driver FC4 NVME support */
13614 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13615 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13616 "6133 Disabling NVME support: "
13617 "FC4 type not supported: x%x\n",
13618 phba->cfg_enable_fc4_type);
13622 /* No firmware NVME support, check driver FC4 NVME support */
13623 sli4_params->nvme = 0;
13624 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13626 "6101 Disabling NVME support: Not "
13627 "supported by firmware (%d %d) x%x\n",
13628 bf_get(cfg_nvme, mbx_sli4_parameters),
13629 bf_get(cfg_xib, mbx_sli4_parameters),
13630 phba->cfg_enable_fc4_type);
13632 phba->nvmet_support = 0;
13633 phba->cfg_nvmet_mrq = 0;
13634 phba->cfg_nvme_seg_cnt = 0;
13636 /* If no FC4 type support, move to just SCSI support */
13637 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13639 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13643 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13644 * accommodate 512K and 1M IOs in a single nvme buf.
13646 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13647 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13649 /* Enable embedded Payload BDE if support is indicated */
13650 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13651 phba->cfg_enable_pbde = 1;
13653 phba->cfg_enable_pbde = 0;
13656 * To support Suppress Response feature we must satisfy 3 conditions.
13657 * lpfc_suppress_rsp module parameter must be set (default).
13658 * In SLI4-Parameters Descriptor:
13659 * Extended Inline Buffers (XIB) must be supported.
13660 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13661 * (double negative).
13663 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13664 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13665 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13667 phba->cfg_suppress_rsp = 0;
13669 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13670 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13672 /* Make sure that sge_supp_len can be handled by the driver */
13673 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13674 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13677 * Check whether the adapter supports an embedded copy of the
13678 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13679 * to use this option, 128-byte WQEs must be used.
13681 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13682 phba->fcp_embed_io = 1;
13684 phba->fcp_embed_io = 0;
13686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13687 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13688 bf_get(cfg_xib, mbx_sli4_parameters),
13689 phba->cfg_enable_pbde,
13690 phba->fcp_embed_io, sli4_params->nvme,
13691 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13693 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13694 LPFC_SLI_INTF_IF_TYPE_2) &&
13695 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13696 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13697 exp_wqcq_pages = false;
13699 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13700 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13702 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13703 phba->enab_exp_wqcq_pages = 1;
13705 phba->enab_exp_wqcq_pages = 0;
13707 * Check if the SLI port supports MDS Diagnostics
13709 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13710 phba->mds_diags_support = 1;
13712 phba->mds_diags_support = 0;
13715 * Check if the SLI port supports NSLER
13717 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13726 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13727 * @pdev: pointer to PCI device
13728 * @pid: pointer to PCI device identifier
13730 * This routine is to be called to attach a device with SLI-3 interface spec
13731 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13732 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13733 * information of the device and driver to see if the driver state that it can
13734 * support this kind of device. If the match is successful, the driver core
13735 * invokes this routine. If this routine determines it can claim the HBA, it
13736 * does all the initialization that it needs to do to handle the HBA properly.
13739 * 0 - driver can claim the device
13740 * negative value - driver can not claim the device
13743 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13745 struct lpfc_hba *phba;
13746 struct lpfc_vport *vport = NULL;
13747 struct Scsi_Host *shost = NULL;
13749 uint32_t cfg_mode, intr_mode;
13751 /* Allocate memory for HBA structure */
13752 phba = lpfc_hba_alloc(pdev);
13756 /* Perform generic PCI device enabling operation */
13757 error = lpfc_enable_pci_dev(phba);
13759 goto out_free_phba;
13761 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13762 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13764 goto out_disable_pci_dev;
13766 /* Set up SLI-3 specific device PCI memory space */
13767 error = lpfc_sli_pci_mem_setup(phba);
13769 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13770 "1402 Failed to set up pci memory space.\n");
13771 goto out_disable_pci_dev;
13774 /* Set up SLI-3 specific device driver resources */
13775 error = lpfc_sli_driver_resource_setup(phba);
13777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13778 "1404 Failed to set up driver resource.\n");
13779 goto out_unset_pci_mem_s3;
13782 /* Initialize and populate the iocb list per host */
13784 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13787 "1405 Failed to initialize iocb list.\n");
13788 goto out_unset_driver_resource_s3;
13791 /* Set up common device driver resources */
13792 error = lpfc_setup_driver_resource_phase2(phba);
13794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13795 "1406 Failed to set up driver resource.\n");
13796 goto out_free_iocb_list;
13799 /* Get the default values for Model Name and Description */
13800 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13802 /* Create SCSI host to the physical port */
13803 error = lpfc_create_shost(phba);
13805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13806 "1407 Failed to create scsi host.\n");
13807 goto out_unset_driver_resource;
13810 /* Configure sysfs attributes */
13811 vport = phba->pport;
13812 error = lpfc_alloc_sysfs_attr(vport);
13814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13815 "1476 Failed to allocate sysfs attr\n");
13816 goto out_destroy_shost;
13819 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
13820 /* Now, trying to enable interrupt and bring up the device */
13821 cfg_mode = phba->cfg_use_msi;
13823 /* Put device to a known state before enabling interrupt */
13824 lpfc_stop_port(phba);
13825 /* Configure and enable interrupt */
13826 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
13827 if (intr_mode == LPFC_INTR_ERROR) {
13828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13829 "0431 Failed to enable interrupt.\n");
13831 goto out_free_sysfs_attr;
13833 /* SLI-3 HBA setup */
13834 if (lpfc_sli_hba_setup(phba)) {
13835 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13836 "1477 Failed to set up hba\n");
13838 goto out_remove_device;
13841 /* Wait 50ms for the interrupts of previous mailbox commands */
13843 /* Check active interrupts on message signaled interrupts */
13844 if (intr_mode == 0 ||
13845 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
13846 /* Log the current active interrupt mode */
13847 phba->intr_mode = intr_mode;
13848 lpfc_log_intr_mode(phba, intr_mode);
13851 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13852 "0447 Configure interrupt mode (%d) "
13853 "failed active interrupt test.\n",
13855 /* Disable the current interrupt mode */
13856 lpfc_sli_disable_intr(phba);
13857 /* Try next level of interrupt mode */
13858 cfg_mode = --intr_mode;
13862 /* Perform post initialization setup */
13863 lpfc_post_init_setup(phba);
13865 /* Check if there are static vports to be created. */
13866 lpfc_create_static_vport(phba);
13871 lpfc_unset_hba(phba);
13872 out_free_sysfs_attr:
13873 lpfc_free_sysfs_attr(vport);
13875 lpfc_destroy_shost(phba);
13876 out_unset_driver_resource:
13877 lpfc_unset_driver_resource_phase2(phba);
13878 out_free_iocb_list:
13879 lpfc_free_iocb_list(phba);
13880 out_unset_driver_resource_s3:
13881 lpfc_sli_driver_resource_unset(phba);
13882 out_unset_pci_mem_s3:
13883 lpfc_sli_pci_mem_unset(phba);
13884 out_disable_pci_dev:
13885 lpfc_disable_pci_dev(phba);
13887 scsi_host_put(shost);
13889 lpfc_hba_free(phba);
13894 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
13895 * @pdev: pointer to PCI device
13897 * This routine is to be called to disattach a device with SLI-3 interface
13898 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13899 * removed from PCI bus, it performs all the necessary cleanup for the HBA
13900 * device to be removed from the PCI subsystem properly.
13903 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
13905 struct Scsi_Host *shost = pci_get_drvdata(pdev);
13906 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
13907 struct lpfc_vport **vports;
13908 struct lpfc_hba *phba = vport->phba;
13911 spin_lock_irq(&phba->hbalock);
13912 vport->load_flag |= FC_UNLOADING;
13913 spin_unlock_irq(&phba->hbalock);
13915 lpfc_free_sysfs_attr(vport);
13917 /* Release all the vports against this physical port */
13918 vports = lpfc_create_vport_work_array(phba);
13919 if (vports != NULL)
13920 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
13921 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
13923 fc_vport_terminate(vports[i]->fc_vport);
13925 lpfc_destroy_vport_work_array(phba, vports);
13927 /* Remove FC host with the physical port */
13928 fc_remove_host(shost);
13929 scsi_remove_host(shost);
13931 /* Clean up all nodes, mailboxes and IOs. */
13932 lpfc_cleanup(vport);
13935 * Bring down the SLI Layer. This step disable all interrupts,
13936 * clears the rings, discards all mailbox commands, and resets
13940 /* HBA interrupt will be disabled after this call */
13941 lpfc_sli_hba_down(phba);
13942 /* Stop kthread signal shall trigger work_done one more time */
13943 kthread_stop(phba->worker_thread);
13944 /* Final cleanup of txcmplq and reset the HBA */
13945 lpfc_sli_brdrestart(phba);
13947 kfree(phba->vpi_bmask);
13948 kfree(phba->vpi_ids);
13950 lpfc_stop_hba_timers(phba);
13951 spin_lock_irq(&phba->port_list_lock);
13952 list_del_init(&vport->listentry);
13953 spin_unlock_irq(&phba->port_list_lock);
13955 lpfc_debugfs_terminate(vport);
13957 /* Disable SR-IOV if enabled */
13958 if (phba->cfg_sriov_nr_virtfn)
13959 pci_disable_sriov(pdev);
13961 /* Disable interrupt */
13962 lpfc_sli_disable_intr(phba);
13964 scsi_host_put(shost);
13967 * Call scsi_free before mem_free since scsi bufs are released to their
13968 * corresponding pools here.
13970 lpfc_scsi_free(phba);
13971 lpfc_free_iocb_list(phba);
13973 lpfc_mem_free_all(phba);
13975 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
13976 phba->hbqslimp.virt, phba->hbqslimp.phys);
13978 /* Free resources associated with SLI2 interface */
13979 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
13980 phba->slim2p.virt, phba->slim2p.phys);
13982 /* unmap adapter SLIM and Control Registers */
13983 iounmap(phba->ctrl_regs_memmap_p);
13984 iounmap(phba->slim_memmap_p);
13986 lpfc_hba_free(phba);
13988 pci_release_mem_regions(pdev);
13989 pci_disable_device(pdev);
13993 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
13994 * @dev_d: pointer to device
13996 * This routine is to be called from the kernel's PCI subsystem to support
13997 * system Power Management (PM) to device with SLI-3 interface spec. When
13998 * PM invokes this method, it quiesces the device by stopping the driver's
13999 * worker thread for the device, turning off device's interrupt and DMA,
14000 * and bring the device offline. Note that as the driver implements the
14001 * minimum PM requirements to a power-aware driver's PM support for the
14002 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14003 * to the suspend() method call will be treated as SUSPEND and the driver will
14004 * fully reinitialize its device during resume() method call, the driver will
14005 * set device to PCI_D3hot state in PCI config space instead of setting it
14006 * according to the @msg provided by the PM.
14009 * 0 - driver suspended the device
14012 static int __maybe_unused
14013 lpfc_pci_suspend_one_s3(struct device *dev_d)
14015 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14016 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14018 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14019 "0473 PCI device Power Management suspend.\n");
14021 /* Bring down the device */
14022 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14023 lpfc_offline(phba);
14024 kthread_stop(phba->worker_thread);
14026 /* Disable interrupt from device */
14027 lpfc_sli_disable_intr(phba);
14033 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14034 * @dev_d: pointer to device
14036 * This routine is to be called from the kernel's PCI subsystem to support
14037 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14038 * invokes this method, it restores the device's PCI config space state and
14039 * fully reinitializes the device and brings it online. Note that as the
14040 * driver implements the minimum PM requirements to a power-aware driver's
14041 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14042 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14043 * driver will fully reinitialize its device during resume() method call,
14044 * the device will be set to PCI_D0 directly in PCI config space before
14045 * restoring the state.
14048 * 0 - driver suspended the device
14051 static int __maybe_unused
14052 lpfc_pci_resume_one_s3(struct device *dev_d)
14054 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14055 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14056 uint32_t intr_mode;
14059 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14060 "0452 PCI device Power Management resume.\n");
14062 /* Startup the kernel thread for this host adapter. */
14063 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14064 "lpfc_worker_%d", phba->brd_no);
14065 if (IS_ERR(phba->worker_thread)) {
14066 error = PTR_ERR(phba->worker_thread);
14067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14068 "0434 PM resume failed to start worker "
14069 "thread: error=x%x.\n", error);
14073 /* Configure and enable interrupt */
14074 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14075 if (intr_mode == LPFC_INTR_ERROR) {
14076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14077 "0430 PM resume Failed to enable interrupt\n");
14080 phba->intr_mode = intr_mode;
14082 /* Restart HBA and bring it online */
14083 lpfc_sli_brdrestart(phba);
14086 /* Log the current active interrupt mode */
14087 lpfc_log_intr_mode(phba, phba->intr_mode);
14093 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14094 * @phba: pointer to lpfc hba data structure.
14096 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14097 * aborts all the outstanding SCSI I/Os to the pci device.
14100 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14103 "2723 PCI channel I/O abort preparing for recovery\n");
14106 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14107 * and let the SCSI mid-layer to retry them to recover.
14109 lpfc_sli_abort_fcp_rings(phba);
14113 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14114 * @phba: pointer to lpfc hba data structure.
14116 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14117 * disables the device interrupt and pci device, and aborts the internal FCP
14121 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14123 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14124 "2710 PCI channel disable preparing for reset\n");
14126 /* Block any management I/Os to the device */
14127 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14129 /* Block all SCSI devices' I/Os on the host */
14130 lpfc_scsi_dev_block(phba);
14132 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14133 lpfc_sli_flush_io_rings(phba);
14135 /* stop all timers */
14136 lpfc_stop_hba_timers(phba);
14138 /* Disable interrupt and pci device */
14139 lpfc_sli_disable_intr(phba);
14140 pci_disable_device(phba->pcidev);
14144 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14145 * @phba: pointer to lpfc hba data structure.
14147 * This routine is called to prepare the SLI3 device for PCI slot permanently
14148 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14152 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14154 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14155 "2711 PCI channel permanent disable for failure\n");
14156 /* Block all SCSI devices' I/Os on the host */
14157 lpfc_scsi_dev_block(phba);
14159 /* stop all timers */
14160 lpfc_stop_hba_timers(phba);
14162 /* Clean up all driver's outstanding SCSI I/Os */
14163 lpfc_sli_flush_io_rings(phba);
14167 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14168 * @pdev: pointer to PCI device.
14169 * @state: the current PCI connection state.
14171 * This routine is called from the PCI subsystem for I/O error handling to
14172 * device with SLI-3 interface spec. This function is called by the PCI
14173 * subsystem after a PCI bus error affecting this device has been detected.
14174 * When this function is invoked, it will need to stop all the I/Os and
14175 * interrupt(s) to the device. Once that is done, it will return
14176 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14180 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14181 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14182 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14184 static pci_ers_result_t
14185 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14187 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14188 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14191 case pci_channel_io_normal:
14192 /* Non-fatal error, prepare for recovery */
14193 lpfc_sli_prep_dev_for_recover(phba);
14194 return PCI_ERS_RESULT_CAN_RECOVER;
14195 case pci_channel_io_frozen:
14196 /* Fatal error, prepare for slot reset */
14197 lpfc_sli_prep_dev_for_reset(phba);
14198 return PCI_ERS_RESULT_NEED_RESET;
14199 case pci_channel_io_perm_failure:
14200 /* Permanent failure, prepare for device down */
14201 lpfc_sli_prep_dev_for_perm_failure(phba);
14202 return PCI_ERS_RESULT_DISCONNECT;
14204 /* Unknown state, prepare and request slot reset */
14205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14206 "0472 Unknown PCI error state: x%x\n", state);
14207 lpfc_sli_prep_dev_for_reset(phba);
14208 return PCI_ERS_RESULT_NEED_RESET;
14213 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14214 * @pdev: pointer to PCI device.
14216 * This routine is called from the PCI subsystem for error handling to
14217 * device with SLI-3 interface spec. This is called after PCI bus has been
14218 * reset to restart the PCI card from scratch, as if from a cold-boot.
14219 * During the PCI subsystem error recovery, after driver returns
14220 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14221 * recovery and then call this routine before calling the .resume method
14222 * to recover the device. This function will initialize the HBA device,
14223 * enable the interrupt, but it will just put the HBA to offline state
14224 * without passing any I/O traffic.
14227 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14228 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14230 static pci_ers_result_t
14231 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14233 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14234 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14235 struct lpfc_sli *psli = &phba->sli;
14236 uint32_t intr_mode;
14238 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14239 if (pci_enable_device_mem(pdev)) {
14240 printk(KERN_ERR "lpfc: Cannot re-enable "
14241 "PCI device after reset.\n");
14242 return PCI_ERS_RESULT_DISCONNECT;
14245 pci_restore_state(pdev);
14248 * As the new kernel behavior of pci_restore_state() API call clears
14249 * device saved_state flag, need to save the restored state again.
14251 pci_save_state(pdev);
14253 if (pdev->is_busmaster)
14254 pci_set_master(pdev);
14256 spin_lock_irq(&phba->hbalock);
14257 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14258 spin_unlock_irq(&phba->hbalock);
14260 /* Configure and enable interrupt */
14261 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14262 if (intr_mode == LPFC_INTR_ERROR) {
14263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14264 "0427 Cannot re-enable interrupt after "
14266 return PCI_ERS_RESULT_DISCONNECT;
14268 phba->intr_mode = intr_mode;
14270 /* Take device offline, it will perform cleanup */
14271 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14272 lpfc_offline(phba);
14273 lpfc_sli_brdrestart(phba);
14275 /* Log the current active interrupt mode */
14276 lpfc_log_intr_mode(phba, phba->intr_mode);
14278 return PCI_ERS_RESULT_RECOVERED;
14282 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14283 * @pdev: pointer to PCI device
14285 * This routine is called from the PCI subsystem for error handling to device
14286 * with SLI-3 interface spec. It is called when kernel error recovery tells
14287 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14288 * error recovery. After this call, traffic can start to flow from this device
14292 lpfc_io_resume_s3(struct pci_dev *pdev)
14294 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14295 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14297 /* Bring device online, it will be no-op for non-fatal error resume */
14302 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14303 * @phba: pointer to lpfc hba data structure.
14305 * returns the number of ELS/CT IOCBs to reserve
14308 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14310 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14312 if (phba->sli_rev == LPFC_SLI_REV4) {
14313 if (max_xri <= 100)
14315 else if (max_xri <= 256)
14317 else if (max_xri <= 512)
14319 else if (max_xri <= 1024)
14321 else if (max_xri <= 1536)
14323 else if (max_xri <= 2048)
14332 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14333 * @phba: pointer to lpfc hba data structure.
14335 * returns the number of ELS/CT + NVMET IOCBs to reserve
14338 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14340 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14342 if (phba->nvmet_support)
14343 max_xri += LPFC_NVMET_BUF_POST;
14349 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14350 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14351 const struct firmware *fw)
14356 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14357 /* Three cases: (1) FW was not supported on the detected adapter.
14358 * (2) FW update has been locked out administratively.
14359 * (3) Some other error during FW update.
14360 * In each case, an unmaskable message is written to the console
14361 * for admin diagnosis.
14363 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14364 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14365 magic_number != MAGIC_NUMBER_G6) ||
14366 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14367 magic_number != MAGIC_NUMBER_G7) ||
14368 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14369 magic_number != MAGIC_NUMBER_G7P)) {
14370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14371 "3030 This firmware version is not supported on"
14372 " this HBA model. Device:%x Magic:%x Type:%x "
14373 "ID:%x Size %d %zd\n",
14374 phba->pcidev->device, magic_number, ftype, fid,
14377 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14379 "3021 Firmware downloads have been prohibited "
14380 "by a system configuration setting on "
14381 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14383 phba->pcidev->device, magic_number, ftype, fid,
14387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14388 "3022 FW Download failed. Add Status x%x "
14389 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14391 offset, phba->pcidev->device, magic_number,
14392 ftype, fid, fsize, fw->size);
14399 * lpfc_write_firmware - attempt to write a firmware image to the port
14400 * @fw: pointer to firmware image returned from request_firmware.
14401 * @context: pointer to firmware image returned from request_firmware.
14405 lpfc_write_firmware(const struct firmware *fw, void *context)
14407 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14408 char fwrev[FW_REV_STR_SIZE];
14409 struct lpfc_grp_hdr *image;
14410 struct list_head dma_buffer_list;
14412 struct lpfc_dmabuf *dmabuf, *next;
14413 uint32_t offset = 0, temp_offset = 0;
14414 uint32_t magic_number, ftype, fid, fsize;
14416 /* It can be null in no-wait mode, sanity check */
14421 image = (struct lpfc_grp_hdr *)fw->data;
14423 magic_number = be32_to_cpu(image->magic_number);
14424 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14425 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14426 fsize = be32_to_cpu(image->size);
14428 INIT_LIST_HEAD(&dma_buffer_list);
14429 lpfc_decode_firmware_rev(phba, fwrev, 1);
14430 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14432 "3023 Updating Firmware, Current Version:%s "
14433 "New Version:%s\n",
14434 fwrev, image->revision);
14435 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14436 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14442 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14446 if (!dmabuf->virt) {
14451 list_add_tail(&dmabuf->list, &dma_buffer_list);
14453 while (offset < fw->size) {
14454 temp_offset = offset;
14455 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14456 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14457 memcpy(dmabuf->virt,
14458 fw->data + temp_offset,
14459 fw->size - temp_offset);
14460 temp_offset = fw->size;
14463 memcpy(dmabuf->virt, fw->data + temp_offset,
14465 temp_offset += SLI4_PAGE_SIZE;
14467 rc = lpfc_wr_object(phba, &dma_buffer_list,
14468 (fw->size - offset), &offset);
14470 rc = lpfc_log_write_firmware_error(phba, offset,
14481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14482 "3029 Skipped Firmware update, Current "
14483 "Version:%s New Version:%s\n",
14484 fwrev, image->revision);
14487 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14488 list_del(&dmabuf->list);
14489 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14490 dmabuf->virt, dmabuf->phys);
14493 release_firmware(fw);
14496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14497 "3062 Firmware update error, status %d.\n", rc);
14499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14500 "3024 Firmware update success: size %d.\n", rc);
14504 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14505 * @phba: pointer to lpfc hba data structure.
14506 * @fw_upgrade: which firmware to update.
14508 * This routine is called to perform Linux generic firmware upgrade on device
14509 * that supports such feature.
14512 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14514 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14516 const struct firmware *fw;
14518 /* Only supported on SLI4 interface type 2 for now */
14519 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14520 LPFC_SLI_INTF_IF_TYPE_2)
14523 snprintf(file_name, ELX_MODEL_NAME_SIZE, "/*(DEBLOBBED)*/", phba->ModelName);
14525 if (fw_upgrade == INT_FW_UPGRADE) {
14526 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14527 file_name, &phba->pcidev->dev,
14528 GFP_KERNEL, (void *)phba,
14529 lpfc_write_firmware);
14530 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14531 ret = reject_firmware(&fw, file_name, &phba->pcidev->dev);
14533 lpfc_write_firmware(fw, (void *)phba);
14542 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14543 * @pdev: pointer to PCI device
14544 * @pid: pointer to PCI device identifier
14546 * This routine is called from the kernel's PCI subsystem to device with
14547 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14548 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14549 * information of the device and driver to see if the driver state that it
14550 * can support this kind of device. If the match is successful, the driver
14551 * core invokes this routine. If this routine determines it can claim the HBA,
14552 * it does all the initialization that it needs to do to handle the HBA
14556 * 0 - driver can claim the device
14557 * negative value - driver can not claim the device
14560 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14562 struct lpfc_hba *phba;
14563 struct lpfc_vport *vport = NULL;
14564 struct Scsi_Host *shost = NULL;
14566 uint32_t cfg_mode, intr_mode;
14568 /* Allocate memory for HBA structure */
14569 phba = lpfc_hba_alloc(pdev);
14573 INIT_LIST_HEAD(&phba->poll_list);
14575 /* Perform generic PCI device enabling operation */
14576 error = lpfc_enable_pci_dev(phba);
14578 goto out_free_phba;
14580 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14581 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14583 goto out_disable_pci_dev;
14585 /* Set up SLI-4 specific device PCI memory space */
14586 error = lpfc_sli4_pci_mem_setup(phba);
14588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14589 "1410 Failed to set up pci memory space.\n");
14590 goto out_disable_pci_dev;
14593 /* Set up SLI-4 Specific device driver resources */
14594 error = lpfc_sli4_driver_resource_setup(phba);
14596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14597 "1412 Failed to set up driver resource.\n");
14598 goto out_unset_pci_mem_s4;
14601 INIT_LIST_HEAD(&phba->active_rrq_list);
14602 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14604 /* Set up common device driver resources */
14605 error = lpfc_setup_driver_resource_phase2(phba);
14607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14608 "1414 Failed to set up driver resource.\n");
14609 goto out_unset_driver_resource_s4;
14612 /* Get the default values for Model Name and Description */
14613 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14615 /* Now, trying to enable interrupt and bring up the device */
14616 cfg_mode = phba->cfg_use_msi;
14618 /* Put device to a known state before enabling interrupt */
14619 phba->pport = NULL;
14620 lpfc_stop_port(phba);
14622 /* Init cpu_map array */
14623 lpfc_cpu_map_array_init(phba);
14625 /* Init hba_eq_hdl array */
14626 lpfc_hba_eq_hdl_array_init(phba);
14628 /* Configure and enable interrupt */
14629 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14630 if (intr_mode == LPFC_INTR_ERROR) {
14631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14632 "0426 Failed to enable interrupt.\n");
14634 goto out_unset_driver_resource;
14636 /* Default to single EQ for non-MSI-X */
14637 if (phba->intr_type != MSIX) {
14638 phba->cfg_irq_chann = 1;
14639 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14640 if (phba->nvmet_support)
14641 phba->cfg_nvmet_mrq = 1;
14644 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14646 /* Create SCSI host to the physical port */
14647 error = lpfc_create_shost(phba);
14649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14650 "1415 Failed to create scsi host.\n");
14651 goto out_disable_intr;
14653 vport = phba->pport;
14654 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14656 /* Configure sysfs attributes */
14657 error = lpfc_alloc_sysfs_attr(vport);
14659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14660 "1416 Failed to allocate sysfs attr\n");
14661 goto out_destroy_shost;
14664 /* Set up SLI-4 HBA */
14665 if (lpfc_sli4_hba_setup(phba)) {
14666 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14667 "1421 Failed to set up hba\n");
14669 goto out_free_sysfs_attr;
14672 /* Log the current active interrupt mode */
14673 phba->intr_mode = intr_mode;
14674 lpfc_log_intr_mode(phba, intr_mode);
14676 /* Perform post initialization setup */
14677 lpfc_post_init_setup(phba);
14679 /* NVME support in FW earlier in the driver load corrects the
14680 * FC4 type making a check for nvme_support unnecessary.
14682 if (phba->nvmet_support == 0) {
14683 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14684 /* Create NVME binding with nvme_fc_transport. This
14685 * ensures the vport is initialized. If the localport
14686 * create fails, it should not unload the driver to
14687 * support field issues.
14689 error = lpfc_nvme_create_localport(vport);
14691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14692 "6004 NVME registration "
14693 "failed, error x%x\n",
14699 /* check for firmware upgrade or downgrade */
14700 if (phba->cfg_request_firmware_upgrade)
14701 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14703 /* Check if there are static vports to be created. */
14704 lpfc_create_static_vport(phba);
14706 /* Enable RAS FW log support */
14707 lpfc_sli4_ras_setup(phba);
14709 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14710 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14714 out_free_sysfs_attr:
14715 lpfc_free_sysfs_attr(vport);
14717 lpfc_destroy_shost(phba);
14719 lpfc_sli4_disable_intr(phba);
14720 out_unset_driver_resource:
14721 lpfc_unset_driver_resource_phase2(phba);
14722 out_unset_driver_resource_s4:
14723 lpfc_sli4_driver_resource_unset(phba);
14724 out_unset_pci_mem_s4:
14725 lpfc_sli4_pci_mem_unset(phba);
14726 out_disable_pci_dev:
14727 lpfc_disable_pci_dev(phba);
14729 scsi_host_put(shost);
14731 lpfc_hba_free(phba);
14736 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14737 * @pdev: pointer to PCI device
14739 * This routine is called from the kernel's PCI subsystem to device with
14740 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14741 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14742 * device to be removed from the PCI subsystem properly.
14745 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14747 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14748 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14749 struct lpfc_vport **vports;
14750 struct lpfc_hba *phba = vport->phba;
14753 /* Mark the device unloading flag */
14754 spin_lock_irq(&phba->hbalock);
14755 vport->load_flag |= FC_UNLOADING;
14756 spin_unlock_irq(&phba->hbalock);
14758 lpfc_unreg_congestion_buf(phba);
14760 lpfc_free_sysfs_attr(vport);
14762 /* Release all the vports against this physical port */
14763 vports = lpfc_create_vport_work_array(phba);
14764 if (vports != NULL)
14765 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14766 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14768 fc_vport_terminate(vports[i]->fc_vport);
14770 lpfc_destroy_vport_work_array(phba, vports);
14772 /* Remove FC host with the physical port */
14773 fc_remove_host(shost);
14774 scsi_remove_host(shost);
14776 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
14777 * localports are destroyed after to cleanup all transport memory.
14779 lpfc_cleanup(vport);
14780 lpfc_nvmet_destroy_targetport(phba);
14781 lpfc_nvme_destroy_localport(vport);
14783 /* De-allocate multi-XRI pools */
14784 if (phba->cfg_xri_rebalancing)
14785 lpfc_destroy_multixri_pools(phba);
14788 * Bring down the SLI Layer. This step disables all interrupts,
14789 * clears the rings, discards all mailbox commands, and resets
14790 * the HBA FCoE function.
14792 lpfc_debugfs_terminate(vport);
14794 lpfc_stop_hba_timers(phba);
14795 spin_lock_irq(&phba->port_list_lock);
14796 list_del_init(&vport->listentry);
14797 spin_unlock_irq(&phba->port_list_lock);
14799 /* Perform scsi free before driver resource_unset since scsi
14800 * buffers are released to their corresponding pools here.
14802 lpfc_io_free(phba);
14803 lpfc_free_iocb_list(phba);
14804 lpfc_sli4_hba_unset(phba);
14806 lpfc_unset_driver_resource_phase2(phba);
14807 lpfc_sli4_driver_resource_unset(phba);
14809 /* Unmap adapter Control and Doorbell registers */
14810 lpfc_sli4_pci_mem_unset(phba);
14812 /* Release PCI resources and disable device's PCI function */
14813 scsi_host_put(shost);
14814 lpfc_disable_pci_dev(phba);
14816 /* Finally, free the driver's device data structure */
14817 lpfc_hba_free(phba);
14823 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
14824 * @dev_d: pointer to device
14826 * This routine is called from the kernel's PCI subsystem to support system
14827 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
14828 * this method, it quiesces the device by stopping the driver's worker
14829 * thread for the device, turning off device's interrupt and DMA, and bring
14830 * the device offline. Note that as the driver implements the minimum PM
14831 * requirements to a power-aware driver's PM support for suspend/resume -- all
14832 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
14833 * method call will be treated as SUSPEND and the driver will fully
14834 * reinitialize its device during resume() method call, the driver will set
14835 * device to PCI_D3hot state in PCI config space instead of setting it
14836 * according to the @msg provided by the PM.
14839 * 0 - driver suspended the device
14842 static int __maybe_unused
14843 lpfc_pci_suspend_one_s4(struct device *dev_d)
14845 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14846 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14848 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14849 "2843 PCI device Power Management suspend.\n");
14851 /* Bring down the device */
14852 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14853 lpfc_offline(phba);
14854 kthread_stop(phba->worker_thread);
14856 /* Disable interrupt from device */
14857 lpfc_sli4_disable_intr(phba);
14858 lpfc_sli4_queue_destroy(phba);
14864 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
14865 * @dev_d: pointer to device
14867 * This routine is called from the kernel's PCI subsystem to support system
14868 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
14869 * this method, it restores the device's PCI config space state and fully
14870 * reinitializes the device and brings it online. Note that as the driver
14871 * implements the minimum PM requirements to a power-aware driver's PM for
14872 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14873 * to the suspend() method call will be treated as SUSPEND and the driver
14874 * will fully reinitialize its device during resume() method call, the device
14875 * will be set to PCI_D0 directly in PCI config space before restoring the
14879 * 0 - driver suspended the device
14882 static int __maybe_unused
14883 lpfc_pci_resume_one_s4(struct device *dev_d)
14885 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14886 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14887 uint32_t intr_mode;
14890 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14891 "0292 PCI device Power Management resume.\n");
14893 /* Startup the kernel thread for this host adapter. */
14894 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14895 "lpfc_worker_%d", phba->brd_no);
14896 if (IS_ERR(phba->worker_thread)) {
14897 error = PTR_ERR(phba->worker_thread);
14898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14899 "0293 PM resume failed to start worker "
14900 "thread: error=x%x.\n", error);
14904 /* Configure and enable interrupt */
14905 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
14906 if (intr_mode == LPFC_INTR_ERROR) {
14907 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14908 "0294 PM resume Failed to enable interrupt\n");
14911 phba->intr_mode = intr_mode;
14913 /* Restart HBA and bring it online */
14914 lpfc_sli_brdrestart(phba);
14917 /* Log the current active interrupt mode */
14918 lpfc_log_intr_mode(phba, phba->intr_mode);
14924 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
14925 * @phba: pointer to lpfc hba data structure.
14927 * This routine is called to prepare the SLI4 device for PCI slot recover. It
14928 * aborts all the outstanding SCSI I/Os to the pci device.
14931 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
14933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14934 "2828 PCI channel I/O abort preparing for recovery\n");
14936 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14937 * and let the SCSI mid-layer to retry them to recover.
14939 lpfc_sli_abort_fcp_rings(phba);
14943 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
14944 * @phba: pointer to lpfc hba data structure.
14946 * This routine is called to prepare the SLI4 device for PCI slot reset. It
14947 * disables the device interrupt and pci device, and aborts the internal FCP
14951 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
14953 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14954 "2826 PCI channel disable preparing for reset\n");
14956 /* Block any management I/Os to the device */
14957 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
14959 /* Block all SCSI devices' I/Os on the host */
14960 lpfc_scsi_dev_block(phba);
14962 /* Flush all driver's outstanding I/Os as we are to reset */
14963 lpfc_sli_flush_io_rings(phba);
14965 /* stop all timers */
14966 lpfc_stop_hba_timers(phba);
14968 /* Disable interrupt and pci device */
14969 lpfc_sli4_disable_intr(phba);
14970 lpfc_sli4_queue_destroy(phba);
14971 pci_disable_device(phba->pcidev);
14975 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
14976 * @phba: pointer to lpfc hba data structure.
14978 * This routine is called to prepare the SLI4 device for PCI slot permanently
14979 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14983 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14986 "2827 PCI channel permanent disable for failure\n");
14988 /* Block all SCSI devices' I/Os on the host */
14989 lpfc_scsi_dev_block(phba);
14991 /* stop all timers */
14992 lpfc_stop_hba_timers(phba);
14994 /* Clean up all driver's outstanding I/Os */
14995 lpfc_sli_flush_io_rings(phba);
14999 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15000 * @pdev: pointer to PCI device.
15001 * @state: the current PCI connection state.
15003 * This routine is called from the PCI subsystem for error handling to device
15004 * with SLI-4 interface spec. This function is called by the PCI subsystem
15005 * after a PCI bus error affecting this device has been detected. When this
15006 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15007 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15008 * for the PCI subsystem to perform proper recovery as desired.
15011 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15012 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15014 static pci_ers_result_t
15015 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15017 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15018 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15021 case pci_channel_io_normal:
15022 /* Non-fatal error, prepare for recovery */
15023 lpfc_sli4_prep_dev_for_recover(phba);
15024 return PCI_ERS_RESULT_CAN_RECOVER;
15025 case pci_channel_io_frozen:
15026 /* Fatal error, prepare for slot reset */
15027 lpfc_sli4_prep_dev_for_reset(phba);
15028 return PCI_ERS_RESULT_NEED_RESET;
15029 case pci_channel_io_perm_failure:
15030 /* Permanent failure, prepare for device down */
15031 lpfc_sli4_prep_dev_for_perm_failure(phba);
15032 return PCI_ERS_RESULT_DISCONNECT;
15034 /* Unknown state, prepare and request slot reset */
15035 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15036 "2825 Unknown PCI error state: x%x\n", state);
15037 lpfc_sli4_prep_dev_for_reset(phba);
15038 return PCI_ERS_RESULT_NEED_RESET;
15043 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15044 * @pdev: pointer to PCI device.
15046 * This routine is called from the PCI subsystem for error handling to device
15047 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15048 * restart the PCI card from scratch, as if from a cold-boot. During the
15049 * PCI subsystem error recovery, after the driver returns
15050 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15051 * recovery and then call this routine before calling the .resume method to
15052 * recover the device. This function will initialize the HBA device, enable
15053 * the interrupt, but it will just put the HBA to offline state without
15054 * passing any I/O traffic.
15057 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15058 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15060 static pci_ers_result_t
15061 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15063 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15064 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15065 struct lpfc_sli *psli = &phba->sli;
15066 uint32_t intr_mode;
15068 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15069 if (pci_enable_device_mem(pdev)) {
15070 printk(KERN_ERR "lpfc: Cannot re-enable "
15071 "PCI device after reset.\n");
15072 return PCI_ERS_RESULT_DISCONNECT;
15075 pci_restore_state(pdev);
15078 * As the new kernel behavior of pci_restore_state() API call clears
15079 * device saved_state flag, need to save the restored state again.
15081 pci_save_state(pdev);
15083 if (pdev->is_busmaster)
15084 pci_set_master(pdev);
15086 spin_lock_irq(&phba->hbalock);
15087 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15088 spin_unlock_irq(&phba->hbalock);
15090 /* Init cpu_map array */
15091 lpfc_cpu_map_array_init(phba);
15092 /* Configure and enable interrupt */
15093 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15094 if (intr_mode == LPFC_INTR_ERROR) {
15095 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15096 "2824 Cannot re-enable interrupt after "
15098 return PCI_ERS_RESULT_DISCONNECT;
15100 phba->intr_mode = intr_mode;
15102 /* Log the current active interrupt mode */
15103 lpfc_log_intr_mode(phba, phba->intr_mode);
15105 return PCI_ERS_RESULT_RECOVERED;
15109 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15110 * @pdev: pointer to PCI device
15112 * This routine is called from the PCI subsystem for error handling to device
15113 * with SLI-4 interface spec. It is called when kernel error recovery tells
15114 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15115 * error recovery. After this call, traffic can start to flow from this device
15119 lpfc_io_resume_s4(struct pci_dev *pdev)
15121 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15122 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15125 * In case of slot reset, as function reset is performed through
15126 * mailbox command which needs DMA to be enabled, this operation
15127 * has to be moved to the io resume phase. Taking device offline
15128 * will perform the necessary cleanup.
15130 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15131 /* Perform device reset */
15132 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15133 lpfc_offline(phba);
15134 lpfc_sli_brdrestart(phba);
15135 /* Bring the device back online */
15141 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15142 * @pdev: pointer to PCI device
15143 * @pid: pointer to PCI device identifier
15145 * This routine is to be registered to the kernel's PCI subsystem. When an
15146 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15147 * at PCI device-specific information of the device and driver to see if the
15148 * driver state that it can support this kind of device. If the match is
15149 * successful, the driver core invokes this routine. This routine dispatches
15150 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15151 * do all the initialization that it needs to do to handle the HBA device
15155 * 0 - driver can claim the device
15156 * negative value - driver can not claim the device
15159 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15162 struct lpfc_sli_intf intf;
15164 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15167 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15168 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15169 rc = lpfc_pci_probe_one_s4(pdev, pid);
15171 rc = lpfc_pci_probe_one_s3(pdev, pid);
15177 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15178 * @pdev: pointer to PCI device
15180 * This routine is to be registered to the kernel's PCI subsystem. When an
15181 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15182 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15183 * remove routine, which will perform all the necessary cleanup for the
15184 * device to be removed from the PCI subsystem properly.
15187 lpfc_pci_remove_one(struct pci_dev *pdev)
15189 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15190 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15192 switch (phba->pci_dev_grp) {
15193 case LPFC_PCI_DEV_LP:
15194 lpfc_pci_remove_one_s3(pdev);
15196 case LPFC_PCI_DEV_OC:
15197 lpfc_pci_remove_one_s4(pdev);
15200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15201 "1424 Invalid PCI device group: 0x%x\n",
15202 phba->pci_dev_grp);
15209 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15210 * @dev: pointer to device
15212 * This routine is to be registered to the kernel's PCI subsystem to support
15213 * system Power Management (PM). When PM invokes this method, it dispatches
15214 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15215 * suspend the device.
15218 * 0 - driver suspended the device
15221 static int __maybe_unused
15222 lpfc_pci_suspend_one(struct device *dev)
15224 struct Scsi_Host *shost = dev_get_drvdata(dev);
15225 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15228 switch (phba->pci_dev_grp) {
15229 case LPFC_PCI_DEV_LP:
15230 rc = lpfc_pci_suspend_one_s3(dev);
15232 case LPFC_PCI_DEV_OC:
15233 rc = lpfc_pci_suspend_one_s4(dev);
15236 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15237 "1425 Invalid PCI device group: 0x%x\n",
15238 phba->pci_dev_grp);
15245 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15246 * @dev: pointer to device
15248 * This routine is to be registered to the kernel's PCI subsystem to support
15249 * system Power Management (PM). When PM invokes this method, it dispatches
15250 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15251 * resume the device.
15254 * 0 - driver suspended the device
15257 static int __maybe_unused
15258 lpfc_pci_resume_one(struct device *dev)
15260 struct Scsi_Host *shost = dev_get_drvdata(dev);
15261 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15264 switch (phba->pci_dev_grp) {
15265 case LPFC_PCI_DEV_LP:
15266 rc = lpfc_pci_resume_one_s3(dev);
15268 case LPFC_PCI_DEV_OC:
15269 rc = lpfc_pci_resume_one_s4(dev);
15272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15273 "1426 Invalid PCI device group: 0x%x\n",
15274 phba->pci_dev_grp);
15281 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15282 * @pdev: pointer to PCI device.
15283 * @state: the current PCI connection state.
15285 * This routine is registered to the PCI subsystem for error handling. This
15286 * function is called by the PCI subsystem after a PCI bus error affecting
15287 * this device has been detected. When this routine is invoked, it dispatches
15288 * the action to the proper SLI-3 or SLI-4 device error detected handling
15289 * routine, which will perform the proper error detected operation.
15292 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15293 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15295 static pci_ers_result_t
15296 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15298 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15299 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15300 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15302 switch (phba->pci_dev_grp) {
15303 case LPFC_PCI_DEV_LP:
15304 rc = lpfc_io_error_detected_s3(pdev, state);
15306 case LPFC_PCI_DEV_OC:
15307 rc = lpfc_io_error_detected_s4(pdev, state);
15310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15311 "1427 Invalid PCI device group: 0x%x\n",
15312 phba->pci_dev_grp);
15319 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15320 * @pdev: pointer to PCI device.
15322 * This routine is registered to the PCI subsystem for error handling. This
15323 * function is called after PCI bus has been reset to restart the PCI card
15324 * from scratch, as if from a cold-boot. When this routine is invoked, it
15325 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15326 * routine, which will perform the proper device reset.
15329 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15330 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15332 static pci_ers_result_t
15333 lpfc_io_slot_reset(struct pci_dev *pdev)
15335 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15337 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15339 switch (phba->pci_dev_grp) {
15340 case LPFC_PCI_DEV_LP:
15341 rc = lpfc_io_slot_reset_s3(pdev);
15343 case LPFC_PCI_DEV_OC:
15344 rc = lpfc_io_slot_reset_s4(pdev);
15347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15348 "1428 Invalid PCI device group: 0x%x\n",
15349 phba->pci_dev_grp);
15356 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15357 * @pdev: pointer to PCI device
15359 * This routine is registered to the PCI subsystem for error handling. It
15360 * is called when kernel error recovery tells the lpfc driver that it is
15361 * OK to resume normal PCI operation after PCI bus error recovery. When
15362 * this routine is invoked, it dispatches the action to the proper SLI-3
15363 * or SLI-4 device io_resume routine, which will resume the device operation.
15366 lpfc_io_resume(struct pci_dev *pdev)
15368 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15369 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15371 switch (phba->pci_dev_grp) {
15372 case LPFC_PCI_DEV_LP:
15373 lpfc_io_resume_s3(pdev);
15375 case LPFC_PCI_DEV_OC:
15376 lpfc_io_resume_s4(pdev);
15379 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15380 "1429 Invalid PCI device group: 0x%x\n",
15381 phba->pci_dev_grp);
15388 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15389 * @phba: pointer to lpfc hba data structure.
15391 * This routine checks to see if OAS is supported for this adapter. If
15392 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15393 * the enable oas flag is cleared and the pool created for OAS device data
15398 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15401 if (!phba->cfg_EnableXLane)
15404 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15408 mempool_destroy(phba->device_data_mem_pool);
15409 phba->device_data_mem_pool = NULL;
15416 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15417 * @phba: pointer to lpfc hba data structure.
15419 * This routine checks to see if RAS is supported by the adapter. Check the
15420 * function through which RAS support enablement is to be done.
15423 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15425 /* if ASIC_GEN_NUM >= 0xC) */
15426 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15427 LPFC_SLI_INTF_IF_TYPE_6) ||
15428 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15429 LPFC_SLI_INTF_FAMILY_G6)) {
15430 phba->ras_fwlog.ras_hwsupport = true;
15431 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15432 phba->cfg_ras_fwlog_buffsize)
15433 phba->ras_fwlog.ras_enabled = true;
15435 phba->ras_fwlog.ras_enabled = false;
15437 phba->ras_fwlog.ras_hwsupport = false;
15442 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15444 static const struct pci_error_handlers lpfc_err_handler = {
15445 .error_detected = lpfc_io_error_detected,
15446 .slot_reset = lpfc_io_slot_reset,
15447 .resume = lpfc_io_resume,
15450 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15451 lpfc_pci_suspend_one,
15452 lpfc_pci_resume_one);
15454 static struct pci_driver lpfc_driver = {
15455 .name = LPFC_DRIVER_NAME,
15456 .id_table = lpfc_id_table,
15457 .probe = lpfc_pci_probe_one,
15458 .remove = lpfc_pci_remove_one,
15459 .shutdown = lpfc_pci_remove_one,
15460 .driver.pm = &lpfc_pci_pm_ops_one,
15461 .err_handler = &lpfc_err_handler,
15464 static const struct file_operations lpfc_mgmt_fop = {
15465 .owner = THIS_MODULE,
15468 static struct miscdevice lpfc_mgmt_dev = {
15469 .minor = MISC_DYNAMIC_MINOR,
15470 .name = "lpfcmgmt",
15471 .fops = &lpfc_mgmt_fop,
15475 * lpfc_init - lpfc module initialization routine
15477 * This routine is to be invoked when the lpfc module is loaded into the
15478 * kernel. The special kernel macro module_init() is used to indicate the
15479 * role of this routine to the kernel as lpfc module entry point.
15483 * -ENOMEM - FC attach transport failed
15484 * all others - failed
15491 pr_info(LPFC_MODULE_DESC "\n");
15492 pr_info(LPFC_COPYRIGHT "\n");
15494 error = misc_register(&lpfc_mgmt_dev);
15496 printk(KERN_ERR "Could not register lpfcmgmt device, "
15497 "misc_register returned with status %d", error);
15500 lpfc_transport_functions.vport_create = lpfc_vport_create;
15501 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15502 lpfc_transport_template =
15503 fc_attach_transport(&lpfc_transport_functions);
15504 if (lpfc_transport_template == NULL)
15506 lpfc_vport_transport_template =
15507 fc_attach_transport(&lpfc_vport_transport_functions);
15508 if (lpfc_vport_transport_template == NULL) {
15509 fc_release_transport(lpfc_transport_template);
15512 lpfc_wqe_cmd_template();
15513 lpfc_nvmet_cmd_template();
15515 /* Initialize in case vector mapping is needed */
15516 lpfc_present_cpu = num_present_cpus();
15518 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15519 "lpfc/sli4:online",
15520 lpfc_cpu_online, lpfc_cpu_offline);
15522 goto cpuhp_failure;
15523 lpfc_cpuhp_state = error;
15525 error = pci_register_driver(&lpfc_driver);
15532 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15534 fc_release_transport(lpfc_transport_template);
15535 fc_release_transport(lpfc_vport_transport_template);
15537 misc_deregister(&lpfc_mgmt_dev);
15542 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15544 unsigned int start_idx;
15545 unsigned int dbg_cnt;
15546 unsigned int temp_idx;
15549 unsigned long rem_nsec;
15551 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15554 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15555 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15558 temp_idx = start_idx;
15559 if (dbg_cnt >= DBG_LOG_SZ) {
15560 dbg_cnt = DBG_LOG_SZ;
15563 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15564 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15566 if (start_idx < dbg_cnt)
15567 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15569 start_idx -= dbg_cnt;
15572 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15573 start_idx, temp_idx, dbg_cnt);
15575 for (i = 0; i < dbg_cnt; i++) {
15576 if ((start_idx + i) < DBG_LOG_SZ)
15577 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15580 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15581 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15583 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15585 phba->dbg_log[temp_idx].log);
15588 atomic_set(&phba->dbg_log_cnt, 0);
15589 atomic_set(&phba->dbg_log_dmping, 0);
15593 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15597 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15598 struct va_format vaf;
15601 va_start(args, fmt);
15602 if (unlikely(dbg_dmping)) {
15605 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15609 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15612 atomic_inc(&phba->dbg_log_cnt);
15614 vscnprintf(phba->dbg_log[idx].log,
15615 sizeof(phba->dbg_log[idx].log), fmt, args);
15618 phba->dbg_log[idx].t_ns = local_clock();
15622 * lpfc_exit - lpfc module removal routine
15624 * This routine is invoked when the lpfc module is removed from the kernel.
15625 * The special kernel macro module_exit() is used to indicate the role of
15626 * this routine to the kernel as lpfc module exit point.
15631 misc_deregister(&lpfc_mgmt_dev);
15632 pci_unregister_driver(&lpfc_driver);
15633 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15634 fc_release_transport(lpfc_transport_template);
15635 fc_release_transport(lpfc_vport_transport_template);
15636 idr_destroy(&lpfc_hba_index);
15639 module_init(lpfc_init);
15640 module_exit(lpfc_exit);
15641 MODULE_LICENSE("GPL");
15642 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15643 MODULE_AUTHOR("Broadcom");
15644 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);