1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/irq.h>
41 #include <linux/bitops.h>
42 #include <linux/crash_dump.h>
43 #include <linux/cpu.h>
44 #include <linux/cpuhotplug.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_transport_fc.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/fc/fc_fs.h>
56 #include "lpfc_sli4.h"
58 #include "lpfc_disc.h"
60 #include "lpfc_scsi.h"
61 #include "lpfc_nvme.h"
62 #include "lpfc_logmsg.h"
63 #include "lpfc_crtn.h"
64 #include "lpfc_vport.h"
65 #include "lpfc_version.h"
68 static enum cpuhp_state lpfc_cpuhp_state;
69 /* Used when mapping IRQ vectors in a driver centric manner */
70 static uint32_t lpfc_present_cpu;
71 static bool lpfc_pldv_detect;
73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
75 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
76 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77 static int lpfc_post_rcv_buf(struct lpfc_hba *);
78 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80 static int lpfc_setup_endian_order(struct lpfc_hba *);
81 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84 static void lpfc_init_sgl_list(struct lpfc_hba *);
85 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86 static void lpfc_free_active_sgl(struct lpfc_hba *);
87 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
96 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
97 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
100 static struct scsi_transport_template *lpfc_transport_template = NULL;
101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102 static DEFINE_IDR(lpfc_hba_index);
103 #define LPFC_NVMET_BUF_POST 254
104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
107 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
108 * @phba: pointer to lpfc hba data structure.
110 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
111 * mailbox command. It retrieves the revision information from the HBA and
112 * collects the Vital Product Data (VPD) about the HBA for preparing the
113 * configuration of the HBA.
117 * -ERESTART - requests the SLI layer to reset the HBA and try again.
118 * Any other value - indicates an error.
121 lpfc_config_port_prep(struct lpfc_hba *phba)
123 lpfc_vpd_t *vp = &phba->vpd;
127 char *lpfc_vpd_data = NULL;
129 static char licensed[56] =
130 "key unlock for use with gnu public licensed code only\0";
131 static int init_key = 1;
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
135 phba->link_state = LPFC_HBA_ERROR;
140 phba->link_state = LPFC_INIT_MBX_CMDS;
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
144 uint32_t *ptext = (uint32_t *) licensed;
146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147 *ptext = cpu_to_be32(*ptext);
151 lpfc_read_nv(phba, pmb);
152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153 sizeof (mb->un.varRDnvp.rsvd3));
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
159 if (rc != MBX_SUCCESS) {
160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161 "0324 Config Port initialization "
162 "error, mbxCmd x%x READ_NVPARM, "
164 mb->mbxCommand, mb->mbxStatus);
165 mempool_free(pmb, phba->mbox_mem_pool);
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
175 * Clear all option bits except LPFC_SLI3_BG_ENABLED,
176 * which was already set in lpfc_get_cfgparam()
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
180 /* Setup and issue mailbox READ REV command */
181 lpfc_read_rev(phba, pmb);
182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183 if (rc != MBX_SUCCESS) {
184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185 "0439 Adapter failed to init, mbxCmd x%x "
186 "READ_REV, mbxStatus x%x\n",
187 mb->mbxCommand, mb->mbxStatus);
188 mempool_free( pmb, phba->mbox_mem_pool);
194 * The value of rr must be 1 since the driver set the cv field to 1.
195 * This setting requires the FW to set all revision fields.
197 if (mb->un.varRdRev.rr == 0) {
199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200 "0440 Adapter failed to init, READ_REV has "
201 "missing revision information.\n");
202 mempool_free(pmb, phba->mbox_mem_pool);
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207 mempool_free(pmb, phba->mbox_mem_pool);
211 /* Save information as VPD data */
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
219 vp->rev.smRev = mb->un.varRdRev.smRev;
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
229 /* If the sli feature level is less then 9, we must
230 * tear down all RPIs and VPIs on link down if NPIV
233 if (vp->rev.feaLevelHigh < 9)
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
236 if (lpfc_is_LC_HBA(phba->pcidev->device))
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238 sizeof (phba->RandomData));
240 /* Get adapter VPD information */
241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
248 if (rc != MBX_SUCCESS) {
249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250 "0441 VPD not present on adapter, "
251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252 mb->mbxCommand, mb->mbxStatus);
253 mb->un.varDmp.word_cnt = 0;
255 /* dump mem may return a zero when finished or we got a
256 * mailbox error, either way we are done.
258 if (mb->un.varDmp.word_cnt == 0)
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264 lpfc_vpd_data + offset,
265 mb->un.varDmp.word_cnt);
266 offset += mb->un.varDmp.word_cnt;
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
271 kfree(lpfc_vpd_data);
273 mempool_free(pmb, phba->mbox_mem_pool);
278 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
279 * @phba: pointer to lpfc hba data structure.
280 * @pmboxq: pointer to the driver internal queue element for mailbox command.
282 * This is the completion handler for driver's configuring asynchronous event
283 * mailbox command to the device. If the mailbox command returns successfully,
284 * it will set internal async event support flag to 1; otherwise, it will
285 * set internal async event support flag to 0.
288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291 phba->temp_sensor_support = 1;
293 phba->temp_sensor_support = 0;
294 mempool_free(pmboxq, phba->mbox_mem_pool);
299 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
300 * @phba: pointer to lpfc hba data structure.
301 * @pmboxq: pointer to the driver internal queue element for mailbox command.
303 * This is the completion handler for dump mailbox command for getting
304 * wake up parameters. When this command complete, the response contain
305 * Option rom version of the HBA. This function translate the version number
306 * into a human readable string and store it in OptionROMVersion.
309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
312 uint32_t prog_id_word;
314 /* character array used for decoding dist type. */
315 char dist_char[] = "nabx";
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318 mempool_free(pmboxq, phba->mbox_mem_pool);
322 prg = (struct prog_id *) &prog_id_word;
324 /* word 7 contain option rom version */
325 prog_id_word = pmboxq->u.mb.un.varWords[7];
327 /* Decode the Option rom version word to a readable string */
329 dist = dist_char[prg->dist];
331 if ((prg->dist == 3) && (prg->num == 0))
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
333 prg->ver, prg->rev, prg->lev);
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
336 prg->ver, prg->rev, prg->lev,
338 mempool_free(pmboxq, phba->mbox_mem_pool);
343 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
344 * @vport: pointer to lpfc vport data structure.
351 lpfc_update_vport_wwn(struct lpfc_vport *vport)
353 struct lpfc_hba *phba = vport->phba;
356 * If the name is empty or there exists a soft name
357 * then copy the service params name, otherwise use the fc name
359 if (vport->fc_nodename.u.wwn[0] == 0)
360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
361 sizeof(struct lpfc_name));
363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
364 sizeof(struct lpfc_name));
367 * If the port name has changed, then set the Param changes flag
370 if (vport->fc_portname.u.wwn[0] != 0 &&
371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
372 sizeof(struct lpfc_name))) {
373 vport->vport_flag |= FAWWPN_PARAM_CHG;
375 if (phba->sli_rev == LPFC_SLI_REV4 &&
376 vport->port_type == LPFC_PHYSICAL_PORT &&
377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
378 lpfc_printf_log(phba, KERN_INFO,
379 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
380 "2701 FA-PWWN change WWPN from %llx to "
381 "%llx: vflag x%x fawwpn_flag x%x\n",
382 wwn_to_u64(vport->fc_portname.u.wwn),
384 (vport->fc_sparam.portName.u.wwn),
386 phba->sli4_hba.fawwpn_flag);
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
392 if (vport->fc_portname.u.wwn[0] == 0)
393 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
394 sizeof(struct lpfc_name));
396 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
397 sizeof(struct lpfc_name));
401 * lpfc_config_port_post - Perform lpfc initialization after config port
402 * @phba: pointer to lpfc hba data structure.
404 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
405 * command call. It performs all internal resource and state setups on the
406 * port: post IOCB buffers, enable appropriate host interrupt attentions,
407 * ELS ring timers, etc.
411 * Any other value - error.
414 lpfc_config_port_post(struct lpfc_hba *phba)
416 struct lpfc_vport *vport = phba->pport;
417 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
420 struct lpfc_dmabuf *mp;
421 struct lpfc_sli *psli = &phba->sli;
422 uint32_t status, timeout;
426 spin_lock_irq(&phba->hbalock);
428 * If the Config port completed correctly the HBA is not
429 * over heated any more.
431 if (phba->over_temp_state == HBA_OVER_TEMP)
432 phba->over_temp_state = HBA_NORMAL_TEMP;
433 spin_unlock_irq(&phba->hbalock);
435 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
437 phba->link_state = LPFC_HBA_ERROR;
442 /* Get login parameters for NID. */
443 rc = lpfc_read_sparam(phba, pmb, 0);
445 mempool_free(pmb, phba->mbox_mem_pool);
450 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
452 "0448 Adapter failed init, mbxCmd x%x "
453 "READ_SPARM mbxStatus x%x\n",
454 mb->mbxCommand, mb->mbxStatus);
455 phba->link_state = LPFC_HBA_ERROR;
456 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
462 /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
463 * longer needed. Prevent unintended ctx_buf access as the mbox is
466 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
467 lpfc_mbuf_free(phba, mp->virt, mp->phys);
470 lpfc_update_vport_wwn(vport);
472 /* Update the fc_host data structures with new wwn. */
473 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
474 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
475 fc_host_max_npiv_vports(shost) = phba->max_vpi;
477 /* If no serial number in VPD data, use low 6 bytes of WWNN */
478 /* This should be consolidated into parse_vpd ? - mr */
479 if (phba->SerialNumber[0] == 0) {
482 outptr = &vport->fc_nodename.u.s.IEEE[0];
483 for (i = 0; i < 12; i++) {
485 j = ((status & 0xf0) >> 4);
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x30 + (uint8_t) j);
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x30 + (uint8_t) j);
498 phba->SerialNumber[i] =
499 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
503 lpfc_read_config(phba, pmb);
505 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
507 "0453 Adapter failed to init, mbxCmd x%x "
508 "READ_CONFIG, mbxStatus x%x\n",
509 mb->mbxCommand, mb->mbxStatus);
510 phba->link_state = LPFC_HBA_ERROR;
511 mempool_free( pmb, phba->mbox_mem_pool);
515 /* Check if the port is disabled */
516 lpfc_sli_read_link_ste(phba);
518 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
519 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
520 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
521 "3359 HBA queue depth changed from %d to %d\n",
522 phba->cfg_hba_queue_depth,
523 mb->un.varRdConfig.max_xri);
524 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
527 phba->lmt = mb->un.varRdConfig.lmt;
529 /* Get the default values for Model Name and Description */
530 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
532 phba->link_state = LPFC_LINK_DOWN;
534 /* Only process IOCBs on ELS ring till hba_state is READY */
535 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
536 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
537 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
538 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
540 /* Post receive buffers for desired rings */
541 if (phba->sli_rev != 3)
542 lpfc_post_rcv_buf(phba);
545 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
547 if (phba->intr_type == MSIX) {
548 rc = lpfc_config_msi(phba, pmb);
550 mempool_free(pmb, phba->mbox_mem_pool);
553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
554 if (rc != MBX_SUCCESS) {
555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
556 "0352 Config MSI mailbox command "
557 "failed, mbxCmd x%x, mbxStatus x%x\n",
558 pmb->u.mb.mbxCommand,
559 pmb->u.mb.mbxStatus);
560 mempool_free(pmb, phba->mbox_mem_pool);
565 spin_lock_irq(&phba->hbalock);
566 /* Initialize ERATT handling flag */
567 phba->hba_flag &= ~HBA_ERATT_HANDLED;
569 /* Enable appropriate host interrupts */
570 if (lpfc_readl(phba->HCregaddr, &status)) {
571 spin_unlock_irq(&phba->hbalock);
574 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
575 if (psli->num_rings > 0)
576 status |= HC_R0INT_ENA;
577 if (psli->num_rings > 1)
578 status |= HC_R1INT_ENA;
579 if (psli->num_rings > 2)
580 status |= HC_R2INT_ENA;
581 if (psli->num_rings > 3)
582 status |= HC_R3INT_ENA;
584 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
585 (phba->cfg_poll & DISABLE_FCP_RING_INT))
586 status &= ~(HC_R0INT_ENA);
588 writel(status, phba->HCregaddr);
589 readl(phba->HCregaddr); /* flush */
590 spin_unlock_irq(&phba->hbalock);
592 /* Set up ring-0 (ELS) timer */
593 timeout = phba->fc_ratov * 2;
594 mod_timer(&vport->els_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * timeout));
596 /* Set up heart beat (HB) timer */
597 mod_timer(&phba->hb_tmofunc,
598 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
599 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
600 phba->last_completion_time = jiffies;
601 /* Set up error attention (ERATT) polling timer */
602 mod_timer(&phba->eratt_poll,
603 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
605 if (phba->hba_flag & LINK_DISABLED) {
606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
607 "2598 Adapter Link is disabled.\n");
608 lpfc_down_link(phba, pmb);
609 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
613 "2599 Adapter failed to issue DOWN_LINK"
614 " mbox command rc 0x%x\n", rc);
616 mempool_free(pmb, phba->mbox_mem_pool);
619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
620 mempool_free(pmb, phba->mbox_mem_pool);
621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
625 /* MBOX buffer will be freed in mbox compl */
626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
628 phba->link_state = LPFC_HBA_ERROR;
632 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
633 pmb->mbox_cmpl = lpfc_config_async_cmpl;
634 pmb->vport = phba->pport;
635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
639 "0456 Adapter failed to issue "
640 "ASYNCEVT_ENABLE mbox status x%x\n",
642 mempool_free(pmb, phba->mbox_mem_pool);
645 /* Get Option rom version */
646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
648 phba->link_state = LPFC_HBA_ERROR;
652 lpfc_dump_wakeup_param(phba, pmb);
653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
654 pmb->vport = phba->pport;
655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
659 "0435 Adapter failed "
660 "to get Option ROM version status x%x\n", rc);
661 mempool_free(pmb, phba->mbox_mem_pool);
668 * lpfc_sli4_refresh_params - update driver copy of params.
669 * @phba: Pointer to HBA context object.
671 * This is called to refresh driver copy of dynamic fields from the
672 * common_get_sli4_parameters descriptor.
675 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
678 struct lpfc_mqe *mqe;
679 struct lpfc_sli4_parameters *mbx_sli4_parameters;
682 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
687 /* Read the port's SLI4 Config Parameters */
688 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
689 sizeof(struct lpfc_sli4_cfg_mhdr));
690 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
691 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
692 length, LPFC_SLI4_MBX_EMBED);
694 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
696 mempool_free(mboxq, phba->mbox_mem_pool);
699 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
701 /* Are we forcing MI off via module parameter? */
702 if (phba->cfg_enable_mi)
703 phba->sli4_hba.pc_sli4_params.mi_ver =
704 bf_get(cfg_mi_ver, mbx_sli4_parameters);
706 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
708 phba->sli4_hba.pc_sli4_params.cmf =
709 bf_get(cfg_cmf, mbx_sli4_parameters);
710 phba->sli4_hba.pc_sli4_params.pls =
711 bf_get(cfg_pvl, mbx_sli4_parameters);
713 mempool_free(mboxq, phba->mbox_mem_pool);
718 * lpfc_hba_init_link - Initialize the FC link
719 * @phba: pointer to lpfc hba data structure.
720 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
722 * This routine will issue the INIT_LINK mailbox command call.
723 * It is available to other drivers through the lpfc_hba data
724 * structure for use as a delayed link up mechanism with the
725 * module parameter lpfc_suppress_link_up.
729 * Any other value - error
732 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
734 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
738 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
739 * @phba: pointer to lpfc hba data structure.
740 * @fc_topology: desired fc topology.
741 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
743 * This routine will issue the INIT_LINK mailbox command call.
744 * It is available to other drivers through the lpfc_hba data
745 * structure for use as a delayed link up mechanism with the
746 * module parameter lpfc_suppress_link_up.
750 * Any other value - error
753 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
756 struct lpfc_vport *vport = phba->pport;
761 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
763 phba->link_state = LPFC_HBA_ERROR;
769 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
770 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
771 !(phba->lmt & LMT_1Gb)) ||
772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
773 !(phba->lmt & LMT_2Gb)) ||
774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
775 !(phba->lmt & LMT_4Gb)) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
777 !(phba->lmt & LMT_8Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
779 !(phba->lmt & LMT_10Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
781 !(phba->lmt & LMT_16Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
783 !(phba->lmt & LMT_32Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
785 !(phba->lmt & LMT_64Gb))) {
786 /* Reset link speed to auto */
787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
788 "1302 Invalid speed for this board:%d "
789 "Reset link speed to auto.\n",
790 phba->cfg_link_speed);
791 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
793 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
794 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
795 if (phba->sli_rev < LPFC_SLI_REV4)
796 lpfc_set_loopback_flag(phba);
797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
798 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800 "0498 Adapter failed to init, mbxCmd x%x "
801 "INIT_LINK, mbxStatus x%x\n",
802 mb->mbxCommand, mb->mbxStatus);
803 if (phba->sli_rev <= LPFC_SLI_REV3) {
804 /* Clear all interrupt enable conditions */
805 writel(0, phba->HCregaddr);
806 readl(phba->HCregaddr); /* flush */
807 /* Clear all pending interrupts */
808 writel(0xffffffff, phba->HAregaddr);
809 readl(phba->HAregaddr); /* flush */
811 phba->link_state = LPFC_HBA_ERROR;
812 if (rc != MBX_BUSY || flag == MBX_POLL)
813 mempool_free(pmb, phba->mbox_mem_pool);
816 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
817 if (flag == MBX_POLL)
818 mempool_free(pmb, phba->mbox_mem_pool);
824 * lpfc_hba_down_link - this routine downs the FC link
825 * @phba: pointer to lpfc hba data structure.
826 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
828 * This routine will issue the DOWN_LINK mailbox command call.
829 * It is available to other drivers through the lpfc_hba data
830 * structure for use to stop the link.
834 * Any other value - error
837 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
842 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
844 phba->link_state = LPFC_HBA_ERROR;
848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
849 "0491 Adapter Link is disabled.\n");
850 lpfc_down_link(phba, pmb);
851 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
852 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
853 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
855 "2522 Adapter failed to issue DOWN_LINK"
856 " mbox command rc 0x%x\n", rc);
858 mempool_free(pmb, phba->mbox_mem_pool);
861 if (flag == MBX_POLL)
862 mempool_free(pmb, phba->mbox_mem_pool);
868 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
869 * @phba: pointer to lpfc HBA data structure.
871 * This routine will do LPFC uninitialization before the HBA is reset when
872 * bringing down the SLI Layer.
876 * Any other value - error.
879 lpfc_hba_down_prep(struct lpfc_hba *phba)
881 struct lpfc_vport **vports;
884 if (phba->sli_rev <= LPFC_SLI_REV3) {
885 /* Disable interrupts */
886 writel(0, phba->HCregaddr);
887 readl(phba->HCregaddr); /* flush */
890 if (phba->pport->load_flag & FC_UNLOADING)
891 lpfc_cleanup_discovery_resources(phba->pport);
893 vports = lpfc_create_vport_work_array(phba);
895 for (i = 0; i <= phba->max_vports &&
896 vports[i] != NULL; i++)
897 lpfc_cleanup_discovery_resources(vports[i]);
898 lpfc_destroy_vport_work_array(phba, vports);
904 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
905 * rspiocb which got deferred
907 * @phba: pointer to lpfc HBA data structure.
909 * This routine will cleanup completed slow path events after HBA is reset
910 * when bringing down the SLI Layer.
917 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
919 struct lpfc_iocbq *rspiocbq;
920 struct hbq_dmabuf *dmabuf;
921 struct lpfc_cq_event *cq_event;
923 spin_lock_irq(&phba->hbalock);
924 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
925 spin_unlock_irq(&phba->hbalock);
927 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
928 /* Get the response iocb from the head of work queue */
929 spin_lock_irq(&phba->hbalock);
930 list_remove_head(&phba->sli4_hba.sp_queue_event,
931 cq_event, struct lpfc_cq_event, list);
932 spin_unlock_irq(&phba->hbalock);
934 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
935 case CQE_CODE_COMPL_WQE:
936 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
938 lpfc_sli_release_iocbq(phba, rspiocbq);
940 case CQE_CODE_RECEIVE:
941 case CQE_CODE_RECEIVE_V1:
942 dmabuf = container_of(cq_event, struct hbq_dmabuf,
944 lpfc_in_buf_free(phba, &dmabuf->dbuf);
950 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
951 * @phba: pointer to lpfc HBA data structure.
953 * This routine will cleanup posted ELS buffers after the HBA is reset
954 * when bringing down the SLI Layer.
961 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
963 struct lpfc_sli *psli = &phba->sli;
964 struct lpfc_sli_ring *pring;
965 struct lpfc_dmabuf *mp, *next_mp;
969 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
970 lpfc_sli_hbqbuf_free_all(phba);
972 /* Cleanup preposted buffers on the ELS ring */
973 pring = &psli->sli3_ring[LPFC_ELS_RING];
974 spin_lock_irq(&phba->hbalock);
975 list_splice_init(&pring->postbufq, &buflist);
976 spin_unlock_irq(&phba->hbalock);
979 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
982 lpfc_mbuf_free(phba, mp->virt, mp->phys);
986 spin_lock_irq(&phba->hbalock);
987 pring->postbufq_cnt -= count;
988 spin_unlock_irq(&phba->hbalock);
993 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
994 * @phba: pointer to lpfc HBA data structure.
996 * This routine will cleanup the txcmplq after the HBA is reset when bringing
997 * down the SLI Layer.
1003 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1005 struct lpfc_sli *psli = &phba->sli;
1006 struct lpfc_queue *qp = NULL;
1007 struct lpfc_sli_ring *pring;
1008 LIST_HEAD(completions);
1010 struct lpfc_iocbq *piocb, *next_iocb;
1012 if (phba->sli_rev != LPFC_SLI_REV4) {
1013 for (i = 0; i < psli->num_rings; i++) {
1014 pring = &psli->sli3_ring[i];
1015 spin_lock_irq(&phba->hbalock);
1016 /* At this point in time the HBA is either reset or DOA
1017 * Nothing should be on txcmplq as it will
1020 list_splice_init(&pring->txcmplq, &completions);
1021 pring->txcmplq_cnt = 0;
1022 spin_unlock_irq(&phba->hbalock);
1024 lpfc_sli_abort_iocb_ring(phba, pring);
1026 /* Cancel all the IOCBs from the completions list */
1027 lpfc_sli_cancel_iocbs(phba, &completions,
1028 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1031 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1035 spin_lock_irq(&pring->ring_lock);
1036 list_for_each_entry_safe(piocb, next_iocb,
1037 &pring->txcmplq, list)
1038 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1039 list_splice_init(&pring->txcmplq, &completions);
1040 pring->txcmplq_cnt = 0;
1041 spin_unlock_irq(&pring->ring_lock);
1042 lpfc_sli_abort_iocb_ring(phba, pring);
1044 /* Cancel all the IOCBs from the completions list */
1045 lpfc_sli_cancel_iocbs(phba, &completions,
1046 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1050 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1051 * @phba: pointer to lpfc HBA data structure.
1053 * This routine will do uninitialization after the HBA is reset when bring
1054 * down the SLI Layer.
1058 * Any other value - error.
1061 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1063 lpfc_hba_free_post_buf(phba);
1064 lpfc_hba_clean_txcmplq(phba);
1069 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1070 * @phba: pointer to lpfc HBA data structure.
1072 * This routine will do uninitialization after the HBA is reset when bring
1073 * down the SLI Layer.
1077 * Any other value - error.
1080 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1082 struct lpfc_io_buf *psb, *psb_next;
1083 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1084 struct lpfc_sli4_hdw_queue *qp;
1086 LIST_HEAD(nvme_aborts);
1087 LIST_HEAD(nvmet_aborts);
1088 struct lpfc_sglq *sglq_entry = NULL;
1092 lpfc_sli_hbqbuf_free_all(phba);
1093 lpfc_hba_clean_txcmplq(phba);
1095 /* At this point in time the HBA is either reset or DOA. Either
1096 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1097 * on the lpfc_els_sgl_list so that it can either be freed if the
1098 * driver is unloading or reposted if the driver is restarting
1102 /* sgl_list_lock required because worker thread uses this
1105 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1106 list_for_each_entry(sglq_entry,
1107 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1108 sglq_entry->state = SGL_FREED;
1110 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1111 &phba->sli4_hba.lpfc_els_sgl_list);
1114 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1116 /* abts_xxxx_buf_list_lock required because worker thread uses this
1119 spin_lock_irq(&phba->hbalock);
1121 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1122 qp = &phba->sli4_hba.hdwq[idx];
1124 spin_lock(&qp->abts_io_buf_list_lock);
1125 list_splice_init(&qp->lpfc_abts_io_buf_list,
1128 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1130 psb->status = IOSTAT_SUCCESS;
1133 spin_lock(&qp->io_buf_list_put_lock);
1134 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1135 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1136 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1137 qp->abts_scsi_io_bufs = 0;
1138 qp->abts_nvme_io_bufs = 0;
1139 spin_unlock(&qp->io_buf_list_put_lock);
1140 spin_unlock(&qp->abts_io_buf_list_lock);
1142 spin_unlock_irq(&phba->hbalock);
1144 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1145 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1146 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1148 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1149 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1150 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1151 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1155 lpfc_sli4_free_sp_events(phba);
1160 * lpfc_hba_down_post - Wrapper func for hba down post routine
1161 * @phba: pointer to lpfc HBA data structure.
1163 * This routine wraps the actual SLI3 or SLI4 routine for performing
1164 * uninitialization after the HBA is reset when bring down the SLI Layer.
1168 * Any other value - error.
1171 lpfc_hba_down_post(struct lpfc_hba *phba)
1173 return (*phba->lpfc_hba_down_post)(phba);
1177 * lpfc_hb_timeout - The HBA-timer timeout handler
1178 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1180 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1181 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1182 * work-port-events bitmap and the worker thread is notified. This timeout
1183 * event will be used by the worker thread to invoke the actual timeout
1184 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1185 * be performed in the timeout handler and the HBA timeout event bit shall
1186 * be cleared by the worker thread after it has taken the event bitmap out.
1189 lpfc_hb_timeout(struct timer_list *t)
1191 struct lpfc_hba *phba;
1192 uint32_t tmo_posted;
1193 unsigned long iflag;
1195 phba = from_timer(phba, t, hb_tmofunc);
1197 /* Check for heart beat timeout conditions */
1198 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1199 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1201 phba->pport->work_port_events |= WORKER_HB_TMO;
1202 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1204 /* Tell the worker thread there is work to do */
1206 lpfc_worker_wake_up(phba);
1211 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1212 * @t: timer context used to obtain the pointer to lpfc hba data structure.
1214 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1215 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1216 * work-port-events bitmap and the worker thread is notified. This timeout
1217 * event will be used by the worker thread to invoke the actual timeout
1218 * handler routine, lpfc_rrq_handler. Any periodical operations will
1219 * be performed in the timeout handler and the RRQ timeout event bit shall
1220 * be cleared by the worker thread after it has taken the event bitmap out.
1223 lpfc_rrq_timeout(struct timer_list *t)
1225 struct lpfc_hba *phba;
1226 unsigned long iflag;
1228 phba = from_timer(phba, t, rrq_tmr);
1229 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1230 if (!(phba->pport->load_flag & FC_UNLOADING))
1231 phba->hba_flag |= HBA_RRQ_ACTIVE;
1233 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1234 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1236 if (!(phba->pport->load_flag & FC_UNLOADING))
1237 lpfc_worker_wake_up(phba);
1241 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1242 * @phba: pointer to lpfc hba data structure.
1243 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1245 * This is the callback function to the lpfc heart-beat mailbox command.
1246 * If configured, the lpfc driver issues the heart-beat mailbox command to
1247 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1248 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1249 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1250 * heart-beat outstanding state. Once the mailbox command comes back and
1251 * no error conditions detected, the heart-beat mailbox command timer is
1252 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1253 * state is cleared for the next heart-beat. If the timer expired with the
1254 * heart-beat outstanding state set, the driver will put the HBA offline.
1257 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1259 unsigned long drvr_flag;
1261 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1262 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1263 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1265 /* Check and reset heart-beat timer if necessary */
1266 mempool_free(pmboxq, phba->mbox_mem_pool);
1267 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1268 !(phba->link_state == LPFC_HBA_ERROR) &&
1269 !(phba->pport->load_flag & FC_UNLOADING))
1270 mod_timer(&phba->hb_tmofunc,
1272 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1277 * lpfc_idle_stat_delay_work - idle_stat tracking
1279 * This routine tracks per-cq idle_stat and determines polling decisions.
1285 lpfc_idle_stat_delay_work(struct work_struct *work)
1287 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1289 idle_stat_delay_work);
1290 struct lpfc_queue *cq;
1291 struct lpfc_sli4_hdw_queue *hdwq;
1292 struct lpfc_idle_stat *idle_stat;
1293 u32 i, idle_percent;
1294 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1296 if (phba->pport->load_flag & FC_UNLOADING)
1299 if (phba->link_state == LPFC_HBA_ERROR ||
1300 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1301 phba->cmf_active_mode != LPFC_CFG_OFF)
1304 for_each_present_cpu(i) {
1305 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1308 /* Skip if we've already handled this cq's primary CPU */
1312 idle_stat = &phba->sli4_hba.idle_stat[i];
1314 /* get_cpu_idle_time returns values as running counters. Thus,
1315 * to know the amount for this period, the prior counter values
1316 * need to be subtracted from the current counter values.
1317 * From there, the idle time stat can be calculated as a
1318 * percentage of 100 - the sum of the other consumption times.
1320 wall_idle = get_cpu_idle_time(i, &wall, 1);
1321 diff_idle = wall_idle - idle_stat->prev_idle;
1322 diff_wall = wall - idle_stat->prev_wall;
1324 if (diff_wall <= diff_idle)
1327 busy_time = diff_wall - diff_idle;
1329 idle_percent = div64_u64(100 * busy_time, diff_wall);
1330 idle_percent = 100 - idle_percent;
1332 if (idle_percent < 15)
1333 cq->poll_mode = LPFC_QUEUE_WORK;
1335 cq->poll_mode = LPFC_IRQ_POLL;
1337 idle_stat->prev_idle = wall_idle;
1338 idle_stat->prev_wall = wall;
1342 schedule_delayed_work(&phba->idle_stat_delay_work,
1343 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1347 lpfc_hb_eq_delay_work(struct work_struct *work)
1349 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1350 struct lpfc_hba, eq_delay_work);
1351 struct lpfc_eq_intr_info *eqi, *eqi_new;
1352 struct lpfc_queue *eq, *eq_next;
1353 unsigned char *ena_delay = NULL;
1357 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1360 if (phba->link_state == LPFC_HBA_ERROR ||
1361 phba->pport->fc_flag & FC_OFFLINE_MODE)
1364 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1369 for (i = 0; i < phba->cfg_irq_chann; i++) {
1370 /* Get the EQ corresponding to the IRQ vector */
1371 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1374 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1375 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1376 ena_delay[eq->last_cpu] = 1;
1380 for_each_present_cpu(i) {
1381 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1383 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1384 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1385 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1392 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1393 if (unlikely(eq->last_cpu != i)) {
1394 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1396 list_move_tail(&eq->cpu_list, &eqi_new->list);
1399 if (usdelay != eq->q_mode)
1400 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1408 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1409 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1413 * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1414 * @phba: pointer to lpfc hba data structure.
1416 * For each heartbeat, this routine does some heuristic methods to adjust
1417 * XRI distribution. The goal is to fully utilize free XRIs.
1419 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1424 hwq_count = phba->cfg_hdw_queue;
1425 for (i = 0; i < hwq_count; i++) {
1426 /* Adjust XRIs in private pool */
1427 lpfc_adjust_pvt_pool_count(phba, i);
1429 /* Adjust high watermark */
1430 lpfc_adjust_high_watermark(phba, i);
1432 #ifdef LPFC_MXP_STAT
1433 /* Snapshot pbl, pvt and busy count */
1434 lpfc_snapshot_mxp(phba, i);
1440 * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1441 * @phba: pointer to lpfc hba data structure.
1443 * If a HB mbox is not already in progrees, this routine will allocate
1444 * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1445 * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1448 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1450 LPFC_MBOXQ_t *pmboxq;
1453 /* Is a Heartbeat mbox already in progress */
1454 if (phba->hba_flag & HBA_HBEAT_INP)
1457 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1461 lpfc_heart_beat(phba, pmboxq);
1462 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1463 pmboxq->vport = phba->pport;
1464 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1466 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1467 mempool_free(pmboxq, phba->mbox_mem_pool);
1470 phba->hba_flag |= HBA_HBEAT_INP;
1476 * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1477 * @phba: pointer to lpfc hba data structure.
1479 * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1480 * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1481 * of the value of lpfc_enable_hba_heartbeat.
1482 * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1483 * try to issue a MBX_HEARTBEAT mbox command.
1486 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1488 if (phba->cfg_enable_hba_heartbeat)
1490 phba->hba_flag |= HBA_HBEAT_TMO;
1494 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1495 * @phba: pointer to lpfc hba data structure.
1497 * This is the actual HBA-timer timeout handler to be invoked by the worker
1498 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1499 * handler performs any periodic operations needed for the device. If such
1500 * periodic event has already been attended to either in the interrupt handler
1501 * or by processing slow-ring or fast-ring events within the HBA-timer
1502 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1503 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1504 * is configured and there is no heart-beat mailbox command outstanding, a
1505 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1506 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1510 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1512 struct lpfc_vport **vports;
1513 struct lpfc_dmabuf *buf_ptr;
1516 struct lpfc_sli *psli = &phba->sli;
1517 LIST_HEAD(completions);
1519 if (phba->cfg_xri_rebalancing) {
1520 /* Multi-XRI pools handler */
1521 lpfc_hb_mxp_handler(phba);
1524 vports = lpfc_create_vport_work_array(phba);
1526 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1527 lpfc_rcv_seq_check_edtov(vports[i]);
1528 lpfc_fdmi_change_check(vports[i]);
1530 lpfc_destroy_vport_work_array(phba, vports);
1532 if ((phba->link_state == LPFC_HBA_ERROR) ||
1533 (phba->pport->load_flag & FC_UNLOADING) ||
1534 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1537 if (phba->elsbuf_cnt &&
1538 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1539 spin_lock_irq(&phba->hbalock);
1540 list_splice_init(&phba->elsbuf, &completions);
1541 phba->elsbuf_cnt = 0;
1542 phba->elsbuf_prev_cnt = 0;
1543 spin_unlock_irq(&phba->hbalock);
1545 while (!list_empty(&completions)) {
1546 list_remove_head(&completions, buf_ptr,
1547 struct lpfc_dmabuf, list);
1548 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1552 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1554 /* If there is no heart beat outstanding, issue a heartbeat command */
1555 if (phba->cfg_enable_hba_heartbeat) {
1556 /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1557 spin_lock_irq(&phba->pport->work_port_lock);
1558 if (time_after(phba->last_completion_time +
1559 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1561 spin_unlock_irq(&phba->pport->work_port_lock);
1562 if (phba->hba_flag & HBA_HBEAT_INP)
1563 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1565 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1568 spin_unlock_irq(&phba->pport->work_port_lock);
1570 /* Check if a MBX_HEARTBEAT is already in progress */
1571 if (phba->hba_flag & HBA_HBEAT_INP) {
1573 * If heart beat timeout called with HBA_HBEAT_INP set
1574 * we need to give the hb mailbox cmd a chance to
1577 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1578 "0459 Adapter heartbeat still outstanding: "
1579 "last compl time was %d ms.\n",
1580 jiffies_to_msecs(jiffies
1581 - phba->last_completion_time));
1582 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1584 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1585 (list_empty(&psli->mboxq))) {
1587 retval = lpfc_issue_hb_mbox(phba);
1589 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1592 phba->skipped_hb = 0;
1593 } else if (time_before_eq(phba->last_completion_time,
1594 phba->skipped_hb)) {
1595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1596 "2857 Last completion time not "
1597 " updated in %d ms\n",
1598 jiffies_to_msecs(jiffies
1599 - phba->last_completion_time));
1601 phba->skipped_hb = jiffies;
1603 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1607 /* Check to see if we want to force a MBX_HEARTBEAT */
1608 if (phba->hba_flag & HBA_HBEAT_TMO) {
1609 retval = lpfc_issue_hb_mbox(phba);
1611 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1613 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1616 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1619 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1623 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1624 * @phba: pointer to lpfc hba data structure.
1626 * This routine is called to bring the HBA offline when HBA hardware error
1627 * other than Port Error 6 has been detected.
1630 lpfc_offline_eratt(struct lpfc_hba *phba)
1632 struct lpfc_sli *psli = &phba->sli;
1634 spin_lock_irq(&phba->hbalock);
1635 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1636 spin_unlock_irq(&phba->hbalock);
1637 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1640 lpfc_reset_barrier(phba);
1641 spin_lock_irq(&phba->hbalock);
1642 lpfc_sli_brdreset(phba);
1643 spin_unlock_irq(&phba->hbalock);
1644 lpfc_hba_down_post(phba);
1645 lpfc_sli_brdready(phba, HS_MBRDY);
1646 lpfc_unblock_mgmt_io(phba);
1647 phba->link_state = LPFC_HBA_ERROR;
1652 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1653 * @phba: pointer to lpfc hba data structure.
1655 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1656 * other than Port Error 6 has been detected.
1659 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1661 spin_lock_irq(&phba->hbalock);
1662 if (phba->link_state == LPFC_HBA_ERROR &&
1663 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1664 spin_unlock_irq(&phba->hbalock);
1667 phba->link_state = LPFC_HBA_ERROR;
1668 spin_unlock_irq(&phba->hbalock);
1670 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1671 lpfc_sli_flush_io_rings(phba);
1673 lpfc_hba_down_post(phba);
1674 lpfc_unblock_mgmt_io(phba);
1678 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1679 * @phba: pointer to lpfc hba data structure.
1681 * This routine is invoked to handle the deferred HBA hardware error
1682 * conditions. This type of error is indicated by HBA by setting ER1
1683 * and another ER bit in the host status register. The driver will
1684 * wait until the ER1 bit clears before handling the error condition.
1687 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1689 uint32_t old_host_status = phba->work_hs;
1690 struct lpfc_sli *psli = &phba->sli;
1692 /* If the pci channel is offline, ignore possible errors,
1693 * since we cannot communicate with the pci card anyway.
1695 if (pci_channel_offline(phba->pcidev)) {
1696 spin_lock_irq(&phba->hbalock);
1697 phba->hba_flag &= ~DEFER_ERATT;
1698 spin_unlock_irq(&phba->hbalock);
1702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1703 "0479 Deferred Adapter Hardware Error "
1704 "Data: x%x x%x x%x\n",
1705 phba->work_hs, phba->work_status[0],
1706 phba->work_status[1]);
1708 spin_lock_irq(&phba->hbalock);
1709 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1710 spin_unlock_irq(&phba->hbalock);
1714 * Firmware stops when it triggred erratt. That could cause the I/Os
1715 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1716 * SCSI layer retry it after re-establishing link.
1718 lpfc_sli_abort_fcp_rings(phba);
1721 * There was a firmware error. Take the hba offline and then
1722 * attempt to restart it.
1724 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1727 /* Wait for the ER1 bit to clear.*/
1728 while (phba->work_hs & HS_FFER1) {
1730 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1731 phba->work_hs = UNPLUG_ERR ;
1734 /* If driver is unloading let the worker thread continue */
1735 if (phba->pport->load_flag & FC_UNLOADING) {
1742 * This is to ptrotect against a race condition in which
1743 * first write to the host attention register clear the
1744 * host status register.
1746 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1747 phba->work_hs = old_host_status & ~HS_FFER1;
1749 spin_lock_irq(&phba->hbalock);
1750 phba->hba_flag &= ~DEFER_ERATT;
1751 spin_unlock_irq(&phba->hbalock);
1752 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1753 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1757 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1759 struct lpfc_board_event_header board_event;
1760 struct Scsi_Host *shost;
1762 board_event.event_type = FC_REG_BOARD_EVENT;
1763 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1764 shost = lpfc_shost_from_vport(phba->pport);
1765 fc_host_post_vendor_event(shost, fc_get_event_number(),
1766 sizeof(board_event),
1767 (char *) &board_event,
1772 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1773 * @phba: pointer to lpfc hba data structure.
1775 * This routine is invoked to handle the following HBA hardware error
1777 * 1 - HBA error attention interrupt
1778 * 2 - DMA ring index out of range
1779 * 3 - Mailbox command came back as unknown
1782 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1784 struct lpfc_vport *vport = phba->pport;
1785 struct lpfc_sli *psli = &phba->sli;
1786 uint32_t event_data;
1787 unsigned long temperature;
1788 struct temp_event temp_event_data;
1789 struct Scsi_Host *shost;
1791 /* If the pci channel is offline, ignore possible errors,
1792 * since we cannot communicate with the pci card anyway.
1794 if (pci_channel_offline(phba->pcidev)) {
1795 spin_lock_irq(&phba->hbalock);
1796 phba->hba_flag &= ~DEFER_ERATT;
1797 spin_unlock_irq(&phba->hbalock);
1801 /* If resets are disabled then leave the HBA alone and return */
1802 if (!phba->cfg_enable_hba_reset)
1805 /* Send an internal error event to mgmt application */
1806 lpfc_board_errevt_to_mgmt(phba);
1808 if (phba->hba_flag & DEFER_ERATT)
1809 lpfc_handle_deferred_eratt(phba);
1811 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1812 if (phba->work_hs & HS_FFER6)
1813 /* Re-establishing Link */
1814 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1815 "1301 Re-establishing Link "
1816 "Data: x%x x%x x%x\n",
1817 phba->work_hs, phba->work_status[0],
1818 phba->work_status[1]);
1819 if (phba->work_hs & HS_FFER8)
1820 /* Device Zeroization */
1821 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1822 "2861 Host Authentication device "
1823 "zeroization Data:x%x x%x x%x\n",
1824 phba->work_hs, phba->work_status[0],
1825 phba->work_status[1]);
1827 spin_lock_irq(&phba->hbalock);
1828 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1829 spin_unlock_irq(&phba->hbalock);
1832 * Firmware stops when it triggled erratt with HS_FFER6.
1833 * That could cause the I/Os dropped by the firmware.
1834 * Error iocb (I/O) on txcmplq and let the SCSI layer
1835 * retry it after re-establishing link.
1837 lpfc_sli_abort_fcp_rings(phba);
1840 * There was a firmware error. Take the hba offline and then
1841 * attempt to restart it.
1843 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1845 lpfc_sli_brdrestart(phba);
1846 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1847 lpfc_unblock_mgmt_io(phba);
1850 lpfc_unblock_mgmt_io(phba);
1851 } else if (phba->work_hs & HS_CRIT_TEMP) {
1852 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1853 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1854 temp_event_data.event_code = LPFC_CRIT_TEMP;
1855 temp_event_data.data = (uint32_t)temperature;
1857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1858 "0406 Adapter maximum temperature exceeded "
1859 "(%ld), taking this port offline "
1860 "Data: x%x x%x x%x\n",
1861 temperature, phba->work_hs,
1862 phba->work_status[0], phba->work_status[1]);
1864 shost = lpfc_shost_from_vport(phba->pport);
1865 fc_host_post_vendor_event(shost, fc_get_event_number(),
1866 sizeof(temp_event_data),
1867 (char *) &temp_event_data,
1868 SCSI_NL_VID_TYPE_PCI
1869 | PCI_VENDOR_ID_EMULEX);
1871 spin_lock_irq(&phba->hbalock);
1872 phba->over_temp_state = HBA_OVER_TEMP;
1873 spin_unlock_irq(&phba->hbalock);
1874 lpfc_offline_eratt(phba);
1877 /* The if clause above forces this code path when the status
1878 * failure is a value other than FFER6. Do not call the offline
1879 * twice. This is the adapter hardware error path.
1881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1882 "0457 Adapter Hardware Error "
1883 "Data: x%x x%x x%x\n",
1885 phba->work_status[0], phba->work_status[1]);
1887 event_data = FC_REG_DUMP_EVENT;
1888 shost = lpfc_shost_from_vport(vport);
1889 fc_host_post_vendor_event(shost, fc_get_event_number(),
1890 sizeof(event_data), (char *) &event_data,
1891 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1893 lpfc_offline_eratt(phba);
1899 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1900 * @phba: pointer to lpfc hba data structure.
1901 * @mbx_action: flag for mailbox shutdown action.
1902 * @en_rn_msg: send reset/port recovery message.
1903 * This routine is invoked to perform an SLI4 port PCI function reset in
1904 * response to port status register polling attention. It waits for port
1905 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1906 * During this process, interrupt vectors are freed and later requested
1907 * for handling possible port resource change.
1910 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1915 LPFC_MBOXQ_t *mboxq;
1917 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1918 LPFC_SLI_INTF_IF_TYPE_2) {
1920 * On error status condition, driver need to wait for port
1921 * ready before performing reset.
1923 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1928 /* need reset: attempt for port recovery */
1930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1931 "2887 Reset Needed: Attempting Port "
1934 /* If we are no wait, the HBA has been reset and is not
1935 * functional, thus we should clear
1936 * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1938 if (mbx_action == LPFC_MBX_NO_WAIT) {
1939 spin_lock_irq(&phba->hbalock);
1940 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1941 if (phba->sli.mbox_active) {
1942 mboxq = phba->sli.mbox_active;
1943 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1944 __lpfc_mbox_cmpl_put(phba, mboxq);
1945 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1946 phba->sli.mbox_active = NULL;
1948 spin_unlock_irq(&phba->hbalock);
1951 lpfc_offline_prep(phba, mbx_action);
1952 lpfc_sli_flush_io_rings(phba);
1954 /* release interrupt for possible resource change */
1955 lpfc_sli4_disable_intr(phba);
1956 rc = lpfc_sli_brdrestart(phba);
1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959 "6309 Failed to restart board\n");
1962 /* request and enable interrupt */
1963 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1964 if (intr_mode == LPFC_INTR_ERROR) {
1965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1966 "3175 Failed to enable interrupt\n");
1969 phba->intr_mode = intr_mode;
1970 rc = lpfc_online(phba);
1972 lpfc_unblock_mgmt_io(phba);
1978 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1979 * @phba: pointer to lpfc hba data structure.
1981 * This routine is invoked to handle the SLI4 HBA hardware error attention
1985 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1987 struct lpfc_vport *vport = phba->pport;
1988 uint32_t event_data;
1989 struct Scsi_Host *shost;
1991 struct lpfc_register portstat_reg = {0};
1992 uint32_t reg_err1, reg_err2;
1993 uint32_t uerrlo_reg, uemasklo_reg;
1994 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1995 bool en_rn_msg = true;
1996 struct temp_event temp_event_data;
1997 struct lpfc_register portsmphr_reg;
2000 /* If the pci channel is offline, ignore possible errors, since
2001 * we cannot communicate with the pci card anyway.
2003 if (pci_channel_offline(phba->pcidev)) {
2004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2005 "3166 pci channel is offline\n");
2006 lpfc_sli_flush_io_rings(phba);
2010 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2011 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2013 case LPFC_SLI_INTF_IF_TYPE_0:
2014 pci_rd_rc1 = lpfc_readl(
2015 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2017 pci_rd_rc2 = lpfc_readl(
2018 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2020 /* consider PCI bus read error as pci_channel_offline */
2021 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2023 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2024 lpfc_sli4_offline_eratt(phba);
2027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2028 "7623 Checking UE recoverable");
2030 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2031 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2032 &portsmphr_reg.word0))
2035 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2037 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2038 LPFC_PORT_SEM_UE_RECOVERABLE)
2040 /*Sleep for 1Sec, before checking SEMAPHORE */
2044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2045 "4827 smphr_port_status x%x : Waited %dSec",
2046 smphr_port_status, i);
2048 /* Recoverable UE, reset the HBA device */
2049 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2050 LPFC_PORT_SEM_UE_RECOVERABLE) {
2051 for (i = 0; i < 20; i++) {
2053 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2054 &portsmphr_reg.word0) &&
2055 (LPFC_POST_STAGE_PORT_READY ==
2056 bf_get(lpfc_port_smphr_port_status,
2058 rc = lpfc_sli4_port_sta_fn_reset(phba,
2059 LPFC_MBX_NO_WAIT, en_rn_msg);
2062 lpfc_printf_log(phba, KERN_ERR,
2064 "4215 Failed to recover UE");
2069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2070 "7624 Firmware not ready: Failing UE recovery,"
2071 " waited %dSec", i);
2072 phba->link_state = LPFC_HBA_ERROR;
2075 case LPFC_SLI_INTF_IF_TYPE_2:
2076 case LPFC_SLI_INTF_IF_TYPE_6:
2077 pci_rd_rc1 = lpfc_readl(
2078 phba->sli4_hba.u.if_type2.STATUSregaddr,
2079 &portstat_reg.word0);
2080 /* consider PCI bus read error as pci_channel_offline */
2081 if (pci_rd_rc1 == -EIO) {
2082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2083 "3151 PCI bus read access failure: x%x\n",
2084 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2085 lpfc_sli4_offline_eratt(phba);
2088 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2089 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2090 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092 "2889 Port Overtemperature event, "
2093 "taking port offline Data: x%x x%x\n",
2094 reg_err1, reg_err2);
2096 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2097 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2098 temp_event_data.event_code = LPFC_CRIT_TEMP;
2099 temp_event_data.data = 0xFFFFFFFF;
2101 shost = lpfc_shost_from_vport(phba->pport);
2102 fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 sizeof(temp_event_data),
2104 (char *)&temp_event_data,
2105 SCSI_NL_VID_TYPE_PCI
2106 | PCI_VENDOR_ID_EMULEX);
2108 spin_lock_irq(&phba->hbalock);
2109 phba->over_temp_state = HBA_OVER_TEMP;
2110 spin_unlock_irq(&phba->hbalock);
2111 lpfc_sli4_offline_eratt(phba);
2114 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2115 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2117 "3143 Port Down: Firmware Update "
2120 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2121 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2123 "3144 Port Down: Debug Dump\n");
2124 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127 "3145 Port Down: Provisioning\n");
2129 /* If resets are disabled then leave the HBA alone and return */
2130 if (!phba->cfg_enable_hba_reset)
2133 /* Check port status register for function reset */
2134 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2137 /* don't report event on forced debug dump */
2138 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2139 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2144 /* fall through for not able to recover */
2145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2146 "3152 Unrecoverable error\n");
2147 phba->link_state = LPFC_HBA_ERROR;
2149 case LPFC_SLI_INTF_IF_TYPE_1:
2153 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2154 "3123 Report dump event to upper layer\n");
2155 /* Send an internal error event to mgmt application */
2156 lpfc_board_errevt_to_mgmt(phba);
2158 event_data = FC_REG_DUMP_EVENT;
2159 shost = lpfc_shost_from_vport(vport);
2160 fc_host_post_vendor_event(shost, fc_get_event_number(),
2161 sizeof(event_data), (char *) &event_data,
2162 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2166 * lpfc_handle_eratt - Wrapper func for handling hba error attention
2167 * @phba: pointer to lpfc HBA data structure.
2169 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2170 * routine from the API jump table function pointer from the lpfc_hba struct.
2174 * Any other value - error.
2177 lpfc_handle_eratt(struct lpfc_hba *phba)
2179 (*phba->lpfc_handle_eratt)(phba);
2183 * lpfc_handle_latt - The HBA link event handler
2184 * @phba: pointer to lpfc hba data structure.
2186 * This routine is invoked from the worker thread to handle a HBA host
2187 * attention link event. SLI3 only.
2190 lpfc_handle_latt(struct lpfc_hba *phba)
2192 struct lpfc_vport *vport = phba->pport;
2193 struct lpfc_sli *psli = &phba->sli;
2195 volatile uint32_t control;
2198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2201 goto lpfc_handle_latt_err_exit;
2204 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2207 mempool_free(pmb, phba->mbox_mem_pool);
2208 goto lpfc_handle_latt_err_exit;
2211 /* Cleanup any outstanding ELS commands */
2212 lpfc_els_flush_all_cmd(phba);
2213 psli->slistat.link_event++;
2214 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2215 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2217 /* Block ELS IOCBs until we have processed this mbox command */
2218 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2219 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2220 if (rc == MBX_NOT_FINISHED) {
2222 goto lpfc_handle_latt_free_mbuf;
2225 /* Clear Link Attention in HA REG */
2226 spin_lock_irq(&phba->hbalock);
2227 writel(HA_LATT, phba->HAregaddr);
2228 readl(phba->HAregaddr); /* flush */
2229 spin_unlock_irq(&phba->hbalock);
2233 lpfc_handle_latt_free_mbuf:
2234 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2235 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2236 lpfc_handle_latt_err_exit:
2237 /* Enable Link attention interrupts */
2238 spin_lock_irq(&phba->hbalock);
2239 psli->sli_flag |= LPFC_PROCESS_LA;
2240 control = readl(phba->HCregaddr);
2241 control |= HC_LAINT_ENA;
2242 writel(control, phba->HCregaddr);
2243 readl(phba->HCregaddr); /* flush */
2245 /* Clear Link Attention in HA REG */
2246 writel(HA_LATT, phba->HAregaddr);
2247 readl(phba->HAregaddr); /* flush */
2248 spin_unlock_irq(&phba->hbalock);
2249 lpfc_linkdown(phba);
2250 phba->link_state = LPFC_HBA_ERROR;
2252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2253 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2259 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2260 * @phba: pointer to lpfc hba data structure.
2261 * @vpd: pointer to the vital product data.
2262 * @len: length of the vital product data in bytes.
2264 * This routine parses the Vital Product Data (VPD). The VPD is treated as
2265 * an array of characters. In this routine, the ModelName, ProgramType, and
2266 * ModelDesc, etc. fields of the phba data structure will be populated.
2269 * 0 - pointer to the VPD passed in is NULL
2273 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2275 uint8_t lenlo, lenhi;
2285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2286 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2287 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2289 while (!finished && (index < (len - 4))) {
2290 switch (vpd[index]) {
2298 i = ((((unsigned short)lenhi) << 8) + lenlo);
2307 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2308 if (Length > len - index)
2309 Length = len - index;
2310 while (Length > 0) {
2311 /* Look for Serial Number */
2312 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2319 phba->SerialNumber[j++] = vpd[index++];
2323 phba->SerialNumber[j] = 0;
2326 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2327 phba->vpd_flag |= VPD_MODEL_DESC;
2334 phba->ModelDesc[j++] = vpd[index++];
2338 phba->ModelDesc[j] = 0;
2341 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2342 phba->vpd_flag |= VPD_MODEL_NAME;
2349 phba->ModelName[j++] = vpd[index++];
2353 phba->ModelName[j] = 0;
2356 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2357 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2364 phba->ProgramType[j++] = vpd[index++];
2368 phba->ProgramType[j] = 0;
2371 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2372 phba->vpd_flag |= VPD_PORT;
2379 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2380 (phba->sli4_hba.pport_name_sta ==
2381 LPFC_SLI4_PPNAME_GET)) {
2385 phba->Port[j++] = vpd[index++];
2389 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2390 (phba->sli4_hba.pport_name_sta ==
2391 LPFC_SLI4_PPNAME_NON))
2418 * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2419 * @phba: pointer to lpfc hba data structure.
2420 * @mdp: pointer to the data structure to hold the derived model name.
2421 * @descp: pointer to the data structure to hold the derived description.
2423 * This routine retrieves HBA's description based on its registered PCI device
2424 * ID. The @descp passed into this function points to an array of 256 chars. It
2425 * shall be returned with the model name, maximum speed, and the host bus type.
2426 * The @mdp passed into this function points to an array of 80 chars. When the
2427 * function returns, the @mdp will be filled with the model name.
2430 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2432 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2433 char *model = "<Unknown>";
2436 switch (sub_dev_id) {
2437 case PCI_DEVICE_ID_CLRY_161E:
2440 case PCI_DEVICE_ID_CLRY_162E:
2443 case PCI_DEVICE_ID_CLRY_164E:
2446 case PCI_DEVICE_ID_CLRY_161P:
2449 case PCI_DEVICE_ID_CLRY_162P:
2452 case PCI_DEVICE_ID_CLRY_164P:
2455 case PCI_DEVICE_ID_CLRY_321E:
2458 case PCI_DEVICE_ID_CLRY_322E:
2461 case PCI_DEVICE_ID_CLRY_324E:
2464 case PCI_DEVICE_ID_CLRY_321P:
2467 case PCI_DEVICE_ID_CLRY_322P:
2470 case PCI_DEVICE_ID_CLRY_324P:
2473 case PCI_DEVICE_ID_TLFC_2XX2:
2477 case PCI_DEVICE_ID_TLFC_3162:
2481 case PCI_DEVICE_ID_TLFC_3322:
2490 if (mdp && mdp[0] == '\0')
2491 snprintf(mdp, 79, "%s", model);
2493 if (descp && descp[0] == '\0')
2494 snprintf(descp, 255,
2495 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2496 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2502 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2503 * @phba: pointer to lpfc hba data structure.
2504 * @mdp: pointer to the data structure to hold the derived model name.
2505 * @descp: pointer to the data structure to hold the derived description.
2507 * This routine retrieves HBA's description based on its registered PCI device
2508 * ID. The @descp passed into this function points to an array of 256 chars. It
2509 * shall be returned with the model name, maximum speed, and the host bus type.
2510 * The @mdp passed into this function points to an array of 80 chars. When the
2511 * function returns, the @mdp will be filled with the model name.
2514 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2517 uint16_t dev_id = phba->pcidev->device;
2520 int oneConnect = 0; /* default is not a oneConnect */
2525 } m = {"<Unknown>", "", ""};
2527 if (mdp && mdp[0] != '\0'
2528 && descp && descp[0] != '\0')
2531 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2532 lpfc_get_atto_model_desc(phba, mdp, descp);
2536 if (phba->lmt & LMT_64Gb)
2538 else if (phba->lmt & LMT_32Gb)
2540 else if (phba->lmt & LMT_16Gb)
2542 else if (phba->lmt & LMT_10Gb)
2544 else if (phba->lmt & LMT_8Gb)
2546 else if (phba->lmt & LMT_4Gb)
2548 else if (phba->lmt & LMT_2Gb)
2550 else if (phba->lmt & LMT_1Gb)
2558 case PCI_DEVICE_ID_FIREFLY:
2559 m = (typeof(m)){"LP6000", "PCI",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2562 case PCI_DEVICE_ID_SUPERFLY:
2563 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2564 m = (typeof(m)){"LP7000", "PCI", ""};
2566 m = (typeof(m)){"LP7000E", "PCI", ""};
2567 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2569 case PCI_DEVICE_ID_DRAGONFLY:
2570 m = (typeof(m)){"LP8000", "PCI",
2571 "Obsolete, Unsupported Fibre Channel Adapter"};
2573 case PCI_DEVICE_ID_CENTAUR:
2574 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2575 m = (typeof(m)){"LP9002", "PCI", ""};
2577 m = (typeof(m)){"LP9000", "PCI", ""};
2578 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2580 case PCI_DEVICE_ID_RFLY:
2581 m = (typeof(m)){"LP952", "PCI",
2582 "Obsolete, Unsupported Fibre Channel Adapter"};
2584 case PCI_DEVICE_ID_PEGASUS:
2585 m = (typeof(m)){"LP9802", "PCI-X",
2586 "Obsolete, Unsupported Fibre Channel Adapter"};
2588 case PCI_DEVICE_ID_THOR:
2589 m = (typeof(m)){"LP10000", "PCI-X",
2590 "Obsolete, Unsupported Fibre Channel Adapter"};
2592 case PCI_DEVICE_ID_VIPER:
2593 m = (typeof(m)){"LPX1000", "PCI-X",
2594 "Obsolete, Unsupported Fibre Channel Adapter"};
2596 case PCI_DEVICE_ID_PFLY:
2597 m = (typeof(m)){"LP982", "PCI-X",
2598 "Obsolete, Unsupported Fibre Channel Adapter"};
2600 case PCI_DEVICE_ID_TFLY:
2601 m = (typeof(m)){"LP1050", "PCI-X",
2602 "Obsolete, Unsupported Fibre Channel Adapter"};
2604 case PCI_DEVICE_ID_HELIOS:
2605 m = (typeof(m)){"LP11000", "PCI-X2",
2606 "Obsolete, Unsupported Fibre Channel Adapter"};
2608 case PCI_DEVICE_ID_HELIOS_SCSP:
2609 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2610 "Obsolete, Unsupported Fibre Channel Adapter"};
2612 case PCI_DEVICE_ID_HELIOS_DCSP:
2613 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2614 "Obsolete, Unsupported Fibre Channel Adapter"};
2616 case PCI_DEVICE_ID_NEPTUNE:
2617 m = (typeof(m)){"LPe1000", "PCIe",
2618 "Obsolete, Unsupported Fibre Channel Adapter"};
2620 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2621 m = (typeof(m)){"LPe1000-SP", "PCIe",
2622 "Obsolete, Unsupported Fibre Channel Adapter"};
2624 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2625 m = (typeof(m)){"LPe1002-SP", "PCIe",
2626 "Obsolete, Unsupported Fibre Channel Adapter"};
2628 case PCI_DEVICE_ID_BMID:
2629 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2631 case PCI_DEVICE_ID_BSMB:
2632 m = (typeof(m)){"LP111", "PCI-X2",
2633 "Obsolete, Unsupported Fibre Channel Adapter"};
2635 case PCI_DEVICE_ID_ZEPHYR:
2636 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2638 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2639 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2641 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2642 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2645 case PCI_DEVICE_ID_ZMID:
2646 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2648 case PCI_DEVICE_ID_ZSMB:
2649 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2651 case PCI_DEVICE_ID_LP101:
2652 m = (typeof(m)){"LP101", "PCI-X",
2653 "Obsolete, Unsupported Fibre Channel Adapter"};
2655 case PCI_DEVICE_ID_LP10000S:
2656 m = (typeof(m)){"LP10000-S", "PCI",
2657 "Obsolete, Unsupported Fibre Channel Adapter"};
2659 case PCI_DEVICE_ID_LP11000S:
2660 m = (typeof(m)){"LP11000-S", "PCI-X2",
2661 "Obsolete, Unsupported Fibre Channel Adapter"};
2663 case PCI_DEVICE_ID_LPE11000S:
2664 m = (typeof(m)){"LPe11000-S", "PCIe",
2665 "Obsolete, Unsupported Fibre Channel Adapter"};
2667 case PCI_DEVICE_ID_SAT:
2668 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2670 case PCI_DEVICE_ID_SAT_MID:
2671 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2673 case PCI_DEVICE_ID_SAT_SMB:
2674 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2676 case PCI_DEVICE_ID_SAT_DCSP:
2677 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2679 case PCI_DEVICE_ID_SAT_SCSP:
2680 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2682 case PCI_DEVICE_ID_SAT_S:
2683 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2685 case PCI_DEVICE_ID_HORNET:
2686 m = (typeof(m)){"LP21000", "PCIe",
2687 "Obsolete, Unsupported FCoE Adapter"};
2690 case PCI_DEVICE_ID_PROTEUS_VF:
2691 m = (typeof(m)){"LPev12000", "PCIe IOV",
2692 "Obsolete, Unsupported Fibre Channel Adapter"};
2694 case PCI_DEVICE_ID_PROTEUS_PF:
2695 m = (typeof(m)){"LPev12000", "PCIe IOV",
2696 "Obsolete, Unsupported Fibre Channel Adapter"};
2698 case PCI_DEVICE_ID_PROTEUS_S:
2699 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2700 "Obsolete, Unsupported Fibre Channel Adapter"};
2702 case PCI_DEVICE_ID_TIGERSHARK:
2704 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2706 case PCI_DEVICE_ID_TOMCAT:
2708 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2710 case PCI_DEVICE_ID_FALCON:
2711 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2712 "EmulexSecure Fibre"};
2714 case PCI_DEVICE_ID_BALIUS:
2715 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2716 "Obsolete, Unsupported Fibre Channel Adapter"};
2718 case PCI_DEVICE_ID_LANCER_FC:
2719 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2721 case PCI_DEVICE_ID_LANCER_FC_VF:
2722 m = (typeof(m)){"LPe16000", "PCIe",
2723 "Obsolete, Unsupported Fibre Channel Adapter"};
2725 case PCI_DEVICE_ID_LANCER_FCOE:
2727 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2729 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2731 m = (typeof(m)){"OCe15100", "PCIe",
2732 "Obsolete, Unsupported FCoE"};
2734 case PCI_DEVICE_ID_LANCER_G6_FC:
2735 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2737 case PCI_DEVICE_ID_LANCER_G7_FC:
2738 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2740 case PCI_DEVICE_ID_LANCER_G7P_FC:
2741 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2743 case PCI_DEVICE_ID_SKYHAWK:
2744 case PCI_DEVICE_ID_SKYHAWK_VF:
2746 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2749 m = (typeof(m)){"Unknown", "", ""};
2753 if (mdp && mdp[0] == '\0')
2754 snprintf(mdp, 79,"%s", m.name);
2756 * oneConnect hba requires special processing, they are all initiators
2757 * and we put the port number on the end
2759 if (descp && descp[0] == '\0') {
2761 snprintf(descp, 255,
2762 "Emulex OneConnect %s, %s Initiator %s",
2765 else if (max_speed == 0)
2766 snprintf(descp, 255,
2768 m.name, m.bus, m.function);
2770 snprintf(descp, 255,
2771 "Emulex %s %d%s %s %s",
2772 m.name, max_speed, (GE) ? "GE" : "Gb",
2778 * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2779 * @phba: pointer to lpfc hba data structure.
2780 * @pring: pointer to a IOCB ring.
2781 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2783 * This routine posts a given number of IOCBs with the associated DMA buffer
2784 * descriptors specified by the cnt argument to the given IOCB ring.
2787 * The number of IOCBs NOT able to be posted to the IOCB ring.
2790 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2793 struct lpfc_iocbq *iocb;
2794 struct lpfc_dmabuf *mp1, *mp2;
2796 cnt += pring->missbufcnt;
2798 /* While there are buffers to post */
2800 /* Allocate buffer for command iocb */
2801 iocb = lpfc_sli_get_iocbq(phba);
2803 pring->missbufcnt = cnt;
2808 /* 2 buffers can be posted per command */
2809 /* Allocate buffer to post */
2810 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2812 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2813 if (!mp1 || !mp1->virt) {
2815 lpfc_sli_release_iocbq(phba, iocb);
2816 pring->missbufcnt = cnt;
2820 INIT_LIST_HEAD(&mp1->list);
2821 /* Allocate buffer to post */
2823 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2825 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2827 if (!mp2 || !mp2->virt) {
2829 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2831 lpfc_sli_release_iocbq(phba, iocb);
2832 pring->missbufcnt = cnt;
2836 INIT_LIST_HEAD(&mp2->list);
2841 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2842 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2843 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2844 icmd->ulpBdeCount = 1;
2847 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2848 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2849 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2851 icmd->ulpBdeCount = 2;
2854 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2857 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2859 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2863 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2867 lpfc_sli_release_iocbq(phba, iocb);
2868 pring->missbufcnt = cnt;
2871 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2873 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2875 pring->missbufcnt = 0;
2880 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2881 * @phba: pointer to lpfc hba data structure.
2883 * This routine posts initial receive IOCB buffers to the ELS ring. The
2884 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2885 * set to 64 IOCBs. SLI3 only.
2888 * 0 - success (currently always success)
2891 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2893 struct lpfc_sli *psli = &phba->sli;
2895 /* Ring 0, ELS / CT buffers */
2896 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2897 /* Ring 2 - FCP no buffers needed */
2902 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2905 * lpfc_sha_init - Set up initial array of hash table entries
2906 * @HashResultPointer: pointer to an array as hash table.
2908 * This routine sets up the initial values to the array of hash table entries
2912 lpfc_sha_init(uint32_t * HashResultPointer)
2914 HashResultPointer[0] = 0x67452301;
2915 HashResultPointer[1] = 0xEFCDAB89;
2916 HashResultPointer[2] = 0x98BADCFE;
2917 HashResultPointer[3] = 0x10325476;
2918 HashResultPointer[4] = 0xC3D2E1F0;
2922 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2923 * @HashResultPointer: pointer to an initial/result hash table.
2924 * @HashWorkingPointer: pointer to an working hash table.
2926 * This routine iterates an initial hash table pointed by @HashResultPointer
2927 * with the values from the working hash table pointeed by @HashWorkingPointer.
2928 * The results are putting back to the initial hash table, returned through
2929 * the @HashResultPointer as the result hash table.
2932 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2936 uint32_t A, B, C, D, E;
2939 HashWorkingPointer[t] =
2941 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2943 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2944 } while (++t <= 79);
2946 A = HashResultPointer[0];
2947 B = HashResultPointer[1];
2948 C = HashResultPointer[2];
2949 D = HashResultPointer[3];
2950 E = HashResultPointer[4];
2954 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2955 } else if (t < 40) {
2956 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2957 } else if (t < 60) {
2958 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2960 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2962 TEMP += S(5, A) + E + HashWorkingPointer[t];
2968 } while (++t <= 79);
2970 HashResultPointer[0] += A;
2971 HashResultPointer[1] += B;
2972 HashResultPointer[2] += C;
2973 HashResultPointer[3] += D;
2974 HashResultPointer[4] += E;
2979 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2980 * @RandomChallenge: pointer to the entry of host challenge random number array.
2981 * @HashWorking: pointer to the entry of the working hash array.
2983 * This routine calculates the working hash array referred by @HashWorking
2984 * from the challenge random numbers associated with the host, referred by
2985 * @RandomChallenge. The result is put into the entry of the working hash
2986 * array and returned by reference through @HashWorking.
2989 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2991 *HashWorking = (*RandomChallenge ^ *HashWorking);
2995 * lpfc_hba_init - Perform special handling for LC HBA initialization
2996 * @phba: pointer to lpfc hba data structure.
2997 * @hbainit: pointer to an array of unsigned 32-bit integers.
2999 * This routine performs the special handling for LC HBA initialization.
3002 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3005 uint32_t *HashWorking;
3006 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3008 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3012 HashWorking[0] = HashWorking[78] = *pwwnn++;
3013 HashWorking[1] = HashWorking[79] = *pwwnn;
3015 for (t = 0; t < 7; t++)
3016 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3018 lpfc_sha_init(hbainit);
3019 lpfc_sha_iterate(hbainit, HashWorking);
3024 * lpfc_cleanup - Performs vport cleanups before deleting a vport
3025 * @vport: pointer to a virtual N_Port data structure.
3027 * This routine performs the necessary cleanups before deleting the @vport.
3028 * It invokes the discovery state machine to perform necessary state
3029 * transitions and to release the ndlps associated with the @vport. Note,
3030 * the physical port is treated as @vport 0.
3033 lpfc_cleanup(struct lpfc_vport *vport)
3035 struct lpfc_hba *phba = vport->phba;
3036 struct lpfc_nodelist *ndlp, *next_ndlp;
3039 if (phba->link_state > LPFC_LINK_DOWN)
3040 lpfc_port_link_failure(vport);
3042 /* Clean up VMID resources */
3043 if (lpfc_is_vmid_enabled(phba))
3044 lpfc_vmid_vport_cleanup(vport);
3046 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3047 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3048 ndlp->nlp_DID == Fabric_DID) {
3049 /* Just free up ndlp with Fabric_DID for vports */
3054 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3055 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3060 /* Fabric Ports not in UNMAPPED state are cleaned up in the
3063 if (ndlp->nlp_type & NLP_FABRIC &&
3064 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3065 lpfc_disc_state_machine(vport, ndlp, NULL,
3066 NLP_EVT_DEVICE_RECOVERY);
3068 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3069 lpfc_disc_state_machine(vport, ndlp, NULL,
3073 /* This is a special case flush to return all
3074 * IOs before entering this loop. There are
3075 * two points in the code where a flush is
3076 * avoided if the FC_UNLOADING flag is set.
3077 * one is in the multipool destroy,
3078 * (this prevents a crash) and the other is
3079 * in the nvme abort handler, ( also prevents
3080 * a crash). Both of these exceptions are
3081 * cases where the slot is still accessible.
3082 * The flush here is only when the pci slot
3085 if (vport->load_flag & FC_UNLOADING &&
3086 pci_channel_offline(phba->pcidev))
3087 lpfc_sli_flush_io_rings(vport->phba);
3089 /* At this point, ALL ndlp's should be gone
3090 * because of the previous NLP_EVT_DEVICE_RM.
3091 * Lets wait for this to happen, if needed.
3093 while (!list_empty(&vport->fc_nodes)) {
3095 lpfc_printf_vlog(vport, KERN_ERR,
3097 "0233 Nodelist not empty\n");
3098 list_for_each_entry_safe(ndlp, next_ndlp,
3099 &vport->fc_nodes, nlp_listp) {
3100 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3102 "0282 did:x%x ndlp:x%px "
3103 "refcnt:%d xflags x%x nflag x%x\n",
3104 ndlp->nlp_DID, (void *)ndlp,
3105 kref_read(&ndlp->kref),
3106 ndlp->fc4_xpt_flags,
3112 /* Wait for any activity on ndlps to settle */
3115 lpfc_cleanup_vports_rrqs(vport, NULL);
3119 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3120 * @vport: pointer to a virtual N_Port data structure.
3122 * This routine stops all the timers associated with a @vport. This function
3123 * is invoked before disabling or deleting a @vport. Note that the physical
3124 * port is treated as @vport 0.
3127 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3129 del_timer_sync(&vport->els_tmofunc);
3130 del_timer_sync(&vport->delayed_disc_tmo);
3131 lpfc_can_disctmo(vport);
3136 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3137 * @phba: pointer to lpfc hba data structure.
3139 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3140 * caller of this routine should already hold the host lock.
3143 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3145 /* Clear pending FCF rediscovery wait flag */
3146 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3148 /* Now, try to stop the timer */
3149 del_timer(&phba->fcf.redisc_wait);
3153 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3154 * @phba: pointer to lpfc hba data structure.
3156 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3157 * checks whether the FCF rediscovery wait timer is pending with the host
3158 * lock held before proceeding with disabling the timer and clearing the
3159 * wait timer pendig flag.
3162 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3164 spin_lock_irq(&phba->hbalock);
3165 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3166 /* FCF rediscovery timer already fired or stopped */
3167 spin_unlock_irq(&phba->hbalock);
3170 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3171 /* Clear failover in progress flags */
3172 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3173 spin_unlock_irq(&phba->hbalock);
3177 * lpfc_cmf_stop - Stop CMF processing
3178 * @phba: pointer to lpfc hba data structure.
3180 * This is called when the link goes down or if CMF mode is turned OFF.
3181 * It is also called when going offline or unloaded just before the
3182 * congestion info buffer is unregistered.
3185 lpfc_cmf_stop(struct lpfc_hba *phba)
3188 struct lpfc_cgn_stat *cgs;
3190 /* We only do something if CMF is enabled */
3191 if (!phba->sli4_hba.pc_sli4_params.cmf)
3194 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3195 "6221 Stop CMF / Cancel Timer\n");
3197 /* Cancel the CMF timer */
3198 hrtimer_cancel(&phba->cmf_timer);
3200 /* Zero CMF counters */
3201 atomic_set(&phba->cmf_busy, 0);
3202 for_each_present_cpu(cpu) {
3203 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3204 atomic64_set(&cgs->total_bytes, 0);
3205 atomic64_set(&cgs->rcv_bytes, 0);
3206 atomic_set(&cgs->rx_io_cnt, 0);
3207 atomic64_set(&cgs->rx_latency, 0);
3209 atomic_set(&phba->cmf_bw_wait, 0);
3211 /* Resume any blocked IO - Queue unblock on workqueue */
3212 queue_work(phba->wq, &phba->unblock_request_work);
3215 static inline uint64_t
3216 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3218 uint64_t rate = lpfc_sli_port_speed_get(phba);
3220 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3224 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3226 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3227 "6223 Signal CMF init\n");
3229 /* Use the new fc_linkspeed to recalculate */
3230 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3231 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3232 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3233 phba->cmf_interval_rate, 1000);
3234 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3236 /* This is a signal to firmware to sync up CMF BW with link speed */
3237 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3241 * lpfc_cmf_start - Start CMF processing
3242 * @phba: pointer to lpfc hba data structure.
3244 * This is called when the link comes up or if CMF mode is turned OFF
3245 * to Monitor or Managed.
3248 lpfc_cmf_start(struct lpfc_hba *phba)
3250 struct lpfc_cgn_stat *cgs;
3253 /* We only do something if CMF is enabled */
3254 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3255 phba->cmf_active_mode == LPFC_CFG_OFF)
3258 /* Reinitialize congestion buffer info */
3259 lpfc_init_congestion_buf(phba);
3261 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3262 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3263 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3264 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3266 atomic_set(&phba->cmf_busy, 0);
3267 for_each_present_cpu(cpu) {
3268 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3269 atomic64_set(&cgs->total_bytes, 0);
3270 atomic64_set(&cgs->rcv_bytes, 0);
3271 atomic_set(&cgs->rx_io_cnt, 0);
3272 atomic64_set(&cgs->rx_latency, 0);
3274 phba->cmf_latency.tv_sec = 0;
3275 phba->cmf_latency.tv_nsec = 0;
3277 lpfc_cmf_signal_init(phba);
3279 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3280 "6222 Start CMF / Timer\n");
3282 phba->cmf_timer_cnt = 0;
3283 hrtimer_start(&phba->cmf_timer,
3284 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3286 /* Setup for latency check in IO cmpl routines */
3287 ktime_get_real_ts64(&phba->cmf_latency);
3289 atomic_set(&phba->cmf_bw_wait, 0);
3290 atomic_set(&phba->cmf_stop_io, 0);
3294 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3295 * @phba: pointer to lpfc hba data structure.
3297 * This routine stops all the timers associated with a HBA. This function is
3298 * invoked before either putting a HBA offline or unloading the driver.
3301 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3304 lpfc_stop_vport_timers(phba->pport);
3305 cancel_delayed_work_sync(&phba->eq_delay_work);
3306 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3307 del_timer_sync(&phba->sli.mbox_tmo);
3308 del_timer_sync(&phba->fabric_block_timer);
3309 del_timer_sync(&phba->eratt_poll);
3310 del_timer_sync(&phba->hb_tmofunc);
3311 if (phba->sli_rev == LPFC_SLI_REV4) {
3312 del_timer_sync(&phba->rrq_tmr);
3313 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3315 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3317 switch (phba->pci_dev_grp) {
3318 case LPFC_PCI_DEV_LP:
3319 /* Stop any LightPulse device specific driver timers */
3320 del_timer_sync(&phba->fcp_poll_timer);
3322 case LPFC_PCI_DEV_OC:
3323 /* Stop any OneConnect device specific driver timers */
3324 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3328 "0297 Invalid device group (x%x)\n",
3336 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3337 * @phba: pointer to lpfc hba data structure.
3338 * @mbx_action: flag for mailbox no wait action.
3340 * This routine marks a HBA's management interface as blocked. Once the HBA's
3341 * management interface is marked as blocked, all the user space access to
3342 * the HBA, whether they are from sysfs interface or libdfc interface will
3343 * all be blocked. The HBA is set to block the management interface when the
3344 * driver prepares the HBA interface for online or offline.
3347 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3349 unsigned long iflag;
3350 uint8_t actcmd = MBX_HEARTBEAT;
3351 unsigned long timeout;
3353 spin_lock_irqsave(&phba->hbalock, iflag);
3354 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3355 spin_unlock_irqrestore(&phba->hbalock, iflag);
3356 if (mbx_action == LPFC_MBX_NO_WAIT)
3358 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3359 spin_lock_irqsave(&phba->hbalock, iflag);
3360 if (phba->sli.mbox_active) {
3361 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3362 /* Determine how long we might wait for the active mailbox
3363 * command to be gracefully completed by firmware.
3365 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3366 phba->sli.mbox_active) * 1000) + jiffies;
3368 spin_unlock_irqrestore(&phba->hbalock, iflag);
3370 /* Wait for the outstnading mailbox command to complete */
3371 while (phba->sli.mbox_active) {
3372 /* Check active mailbox complete status every 2ms */
3374 if (time_after(jiffies, timeout)) {
3375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3376 "2813 Mgmt IO is Blocked %x "
3377 "- mbox cmd %x still active\n",
3378 phba->sli.sli_flag, actcmd);
3385 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3386 * @phba: pointer to lpfc hba data structure.
3388 * Allocate RPIs for all active remote nodes. This is needed whenever
3389 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3390 * is to fixup the temporary rpi assignments.
3393 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3395 struct lpfc_nodelist *ndlp, *next_ndlp;
3396 struct lpfc_vport **vports;
3399 if (phba->sli_rev != LPFC_SLI_REV4)
3402 vports = lpfc_create_vport_work_array(phba);
3406 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3407 if (vports[i]->load_flag & FC_UNLOADING)
3410 list_for_each_entry_safe(ndlp, next_ndlp,
3411 &vports[i]->fc_nodes,
3413 rpi = lpfc_sli4_alloc_rpi(phba);
3414 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3415 /* TODO print log? */
3418 ndlp->nlp_rpi = rpi;
3419 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3420 LOG_NODE | LOG_DISCOVERY,
3421 "0009 Assign RPI x%x to ndlp x%px "
3422 "DID:x%06x flg:x%x\n",
3423 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3427 lpfc_destroy_vport_work_array(phba, vports);
3431 * lpfc_create_expedite_pool - create expedite pool
3432 * @phba: pointer to lpfc hba data structure.
3434 * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3435 * to expedite pool. Mark them as expedite.
3437 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3439 struct lpfc_sli4_hdw_queue *qp;
3440 struct lpfc_io_buf *lpfc_ncmd;
3441 struct lpfc_io_buf *lpfc_ncmd_next;
3442 struct lpfc_epd_pool *epd_pool;
3443 unsigned long iflag;
3445 epd_pool = &phba->epd_pool;
3446 qp = &phba->sli4_hba.hdwq[0];
3448 spin_lock_init(&epd_pool->lock);
3449 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3450 spin_lock(&epd_pool->lock);
3451 INIT_LIST_HEAD(&epd_pool->list);
3452 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3453 &qp->lpfc_io_buf_list_put, list) {
3454 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3455 lpfc_ncmd->expedite = true;
3458 if (epd_pool->count >= XRI_BATCH)
3461 spin_unlock(&epd_pool->lock);
3462 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3466 * lpfc_destroy_expedite_pool - destroy expedite pool
3467 * @phba: pointer to lpfc hba data structure.
3469 * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3470 * of HWQ 0. Clear the mark.
3472 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3474 struct lpfc_sli4_hdw_queue *qp;
3475 struct lpfc_io_buf *lpfc_ncmd;
3476 struct lpfc_io_buf *lpfc_ncmd_next;
3477 struct lpfc_epd_pool *epd_pool;
3478 unsigned long iflag;
3480 epd_pool = &phba->epd_pool;
3481 qp = &phba->sli4_hba.hdwq[0];
3483 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3484 spin_lock(&epd_pool->lock);
3485 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3486 &epd_pool->list, list) {
3487 list_move_tail(&lpfc_ncmd->list,
3488 &qp->lpfc_io_buf_list_put);
3489 lpfc_ncmd->flags = false;
3493 spin_unlock(&epd_pool->lock);
3494 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3498 * lpfc_create_multixri_pools - create multi-XRI pools
3499 * @phba: pointer to lpfc hba data structure.
3501 * This routine initialize public, private per HWQ. Then, move XRIs from
3502 * lpfc_io_buf_list_put to public pool. High and low watermark are also
3505 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3510 struct lpfc_io_buf *lpfc_ncmd;
3511 struct lpfc_io_buf *lpfc_ncmd_next;
3512 unsigned long iflag;
3513 struct lpfc_sli4_hdw_queue *qp;
3514 struct lpfc_multixri_pool *multixri_pool;
3515 struct lpfc_pbl_pool *pbl_pool;
3516 struct lpfc_pvt_pool *pvt_pool;
3518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3519 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3520 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3521 phba->sli4_hba.io_xri_cnt);
3523 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3524 lpfc_create_expedite_pool(phba);
3526 hwq_count = phba->cfg_hdw_queue;
3527 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3529 for (i = 0; i < hwq_count; i++) {
3530 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3532 if (!multixri_pool) {
3533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3534 "1238 Failed to allocate memory for "
3537 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3538 lpfc_destroy_expedite_pool(phba);
3542 qp = &phba->sli4_hba.hdwq[j];
3543 kfree(qp->p_multixri_pool);
3546 phba->cfg_xri_rebalancing = 0;
3550 qp = &phba->sli4_hba.hdwq[i];
3551 qp->p_multixri_pool = multixri_pool;
3553 multixri_pool->xri_limit = count_per_hwq;
3554 multixri_pool->rrb_next_hwqid = i;
3556 /* Deal with public free xri pool */
3557 pbl_pool = &multixri_pool->pbl_pool;
3558 spin_lock_init(&pbl_pool->lock);
3559 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3560 spin_lock(&pbl_pool->lock);
3561 INIT_LIST_HEAD(&pbl_pool->list);
3562 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3563 &qp->lpfc_io_buf_list_put, list) {
3564 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3569 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3570 pbl_pool->count, i);
3571 spin_unlock(&pbl_pool->lock);
3572 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3574 /* Deal with private free xri pool */
3575 pvt_pool = &multixri_pool->pvt_pool;
3576 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3577 pvt_pool->low_watermark = XRI_BATCH;
3578 spin_lock_init(&pvt_pool->lock);
3579 spin_lock_irqsave(&pvt_pool->lock, iflag);
3580 INIT_LIST_HEAD(&pvt_pool->list);
3581 pvt_pool->count = 0;
3582 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3587 * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3588 * @phba: pointer to lpfc hba data structure.
3590 * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3592 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3596 struct lpfc_io_buf *lpfc_ncmd;
3597 struct lpfc_io_buf *lpfc_ncmd_next;
3598 unsigned long iflag;
3599 struct lpfc_sli4_hdw_queue *qp;
3600 struct lpfc_multixri_pool *multixri_pool;
3601 struct lpfc_pbl_pool *pbl_pool;
3602 struct lpfc_pvt_pool *pvt_pool;
3604 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3605 lpfc_destroy_expedite_pool(phba);
3607 if (!(phba->pport->load_flag & FC_UNLOADING))
3608 lpfc_sli_flush_io_rings(phba);
3610 hwq_count = phba->cfg_hdw_queue;
3612 for (i = 0; i < hwq_count; i++) {
3613 qp = &phba->sli4_hba.hdwq[i];
3614 multixri_pool = qp->p_multixri_pool;
3618 qp->p_multixri_pool = NULL;
3620 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3622 /* Deal with public free xri pool */
3623 pbl_pool = &multixri_pool->pbl_pool;
3624 spin_lock(&pbl_pool->lock);
3626 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3627 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3628 pbl_pool->count, i);
3630 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3631 &pbl_pool->list, list) {
3632 list_move_tail(&lpfc_ncmd->list,
3633 &qp->lpfc_io_buf_list_put);
3638 INIT_LIST_HEAD(&pbl_pool->list);
3639 pbl_pool->count = 0;
3641 spin_unlock(&pbl_pool->lock);
3643 /* Deal with private free xri pool */
3644 pvt_pool = &multixri_pool->pvt_pool;
3645 spin_lock(&pvt_pool->lock);
3647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3648 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3649 pvt_pool->count, i);
3651 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3652 &pvt_pool->list, list) {
3653 list_move_tail(&lpfc_ncmd->list,
3654 &qp->lpfc_io_buf_list_put);
3659 INIT_LIST_HEAD(&pvt_pool->list);
3660 pvt_pool->count = 0;
3662 spin_unlock(&pvt_pool->lock);
3663 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3665 kfree(multixri_pool);
3670 * lpfc_online - Initialize and bring a HBA online
3671 * @phba: pointer to lpfc hba data structure.
3673 * This routine initializes the HBA and brings a HBA online. During this
3674 * process, the management interface is blocked to prevent user space access
3675 * to the HBA interfering with the driver initialization.
3682 lpfc_online(struct lpfc_hba *phba)
3684 struct lpfc_vport *vport;
3685 struct lpfc_vport **vports;
3687 bool vpis_cleared = false;
3691 vport = phba->pport;
3693 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3696 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3697 "0458 Bring Adapter online\n");
3699 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3701 if (phba->sli_rev == LPFC_SLI_REV4) {
3702 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3703 lpfc_unblock_mgmt_io(phba);
3706 spin_lock_irq(&phba->hbalock);
3707 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3708 vpis_cleared = true;
3709 spin_unlock_irq(&phba->hbalock);
3711 /* Reestablish the local initiator port.
3712 * The offline process destroyed the previous lport.
3714 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3715 !phba->nvmet_support) {
3716 error = lpfc_nvme_create_localport(phba->pport);
3718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3719 "6132 NVME restore reg failed "
3720 "on nvmei error x%x\n", error);
3723 lpfc_sli_queue_init(phba);
3724 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3725 lpfc_unblock_mgmt_io(phba);
3730 vports = lpfc_create_vport_work_array(phba);
3731 if (vports != NULL) {
3732 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3733 struct Scsi_Host *shost;
3734 shost = lpfc_shost_from_vport(vports[i]);
3735 spin_lock_irq(shost->host_lock);
3736 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3737 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3738 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3739 if (phba->sli_rev == LPFC_SLI_REV4) {
3740 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3741 if ((vpis_cleared) &&
3742 (vports[i]->port_type !=
3743 LPFC_PHYSICAL_PORT))
3746 spin_unlock_irq(shost->host_lock);
3749 lpfc_destroy_vport_work_array(phba, vports);
3751 if (phba->cfg_xri_rebalancing)
3752 lpfc_create_multixri_pools(phba);
3754 lpfc_cpuhp_add(phba);
3756 lpfc_unblock_mgmt_io(phba);
3761 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3762 * @phba: pointer to lpfc hba data structure.
3764 * This routine marks a HBA's management interface as not blocked. Once the
3765 * HBA's management interface is marked as not blocked, all the user space
3766 * access to the HBA, whether they are from sysfs interface or libdfc
3767 * interface will be allowed. The HBA is set to block the management interface
3768 * when the driver prepares the HBA interface for online or offline and then
3769 * set to unblock the management interface afterwards.
3772 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3774 unsigned long iflag;
3776 spin_lock_irqsave(&phba->hbalock, iflag);
3777 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3778 spin_unlock_irqrestore(&phba->hbalock, iflag);
3782 * lpfc_offline_prep - Prepare a HBA to be brought offline
3783 * @phba: pointer to lpfc hba data structure.
3784 * @mbx_action: flag for mailbox shutdown action.
3786 * This routine is invoked to prepare a HBA to be brought offline. It performs
3787 * unregistration login to all the nodes on all vports and flushes the mailbox
3788 * queue to make it ready to be brought offline.
3791 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3793 struct lpfc_vport *vport = phba->pport;
3794 struct lpfc_nodelist *ndlp, *next_ndlp;
3795 struct lpfc_vport **vports;
3796 struct Scsi_Host *shost;
3801 if (vport->fc_flag & FC_OFFLINE_MODE)
3804 lpfc_block_mgmt_io(phba, mbx_action);
3806 lpfc_linkdown(phba);
3808 offline = pci_channel_offline(phba->pcidev);
3809 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3811 /* Issue an unreg_login to all nodes on all vports */
3812 vports = lpfc_create_vport_work_array(phba);
3813 if (vports != NULL) {
3814 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3815 if (vports[i]->load_flag & FC_UNLOADING)
3817 shost = lpfc_shost_from_vport(vports[i]);
3818 spin_lock_irq(shost->host_lock);
3819 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3820 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3821 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3822 spin_unlock_irq(shost->host_lock);
3824 shost = lpfc_shost_from_vport(vports[i]);
3825 list_for_each_entry_safe(ndlp, next_ndlp,
3826 &vports[i]->fc_nodes,
3829 spin_lock_irq(&ndlp->lock);
3830 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3831 spin_unlock_irq(&ndlp->lock);
3833 if (offline || hba_pci_err) {
3834 spin_lock_irq(&ndlp->lock);
3835 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3836 NLP_RPI_REGISTERED);
3837 spin_unlock_irq(&ndlp->lock);
3838 if (phba->sli_rev == LPFC_SLI_REV4)
3839 lpfc_sli_rpi_release(vports[i],
3842 lpfc_unreg_rpi(vports[i], ndlp);
3845 * Whenever an SLI4 port goes offline, free the
3846 * RPI. Get a new RPI when the adapter port
3847 * comes back online.
3849 if (phba->sli_rev == LPFC_SLI_REV4) {
3850 lpfc_printf_vlog(vports[i], KERN_INFO,
3851 LOG_NODE | LOG_DISCOVERY,
3852 "0011 Free RPI x%x on "
3853 "ndlp: x%px did x%x\n",
3854 ndlp->nlp_rpi, ndlp,
3856 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3857 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3860 if (ndlp->nlp_type & NLP_FABRIC) {
3861 lpfc_disc_state_machine(vports[i], ndlp,
3862 NULL, NLP_EVT_DEVICE_RECOVERY);
3864 /* Don't remove the node unless the node
3865 * has been unregistered with the
3866 * transport, and we're not in recovery
3867 * before dev_loss_tmo triggered.
3868 * Otherwise, let dev_loss take care of
3871 if (!(ndlp->save_flags &
3872 NLP_IN_RECOV_POST_DEV_LOSS) &&
3873 !(ndlp->fc4_xpt_flags &
3874 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3875 lpfc_disc_state_machine
3883 lpfc_destroy_vport_work_array(phba, vports);
3885 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3888 flush_workqueue(phba->wq);
3892 * lpfc_offline - Bring a HBA offline
3893 * @phba: pointer to lpfc hba data structure.
3895 * This routine actually brings a HBA offline. It stops all the timers
3896 * associated with the HBA, brings down the SLI layer, and eventually
3897 * marks the HBA as in offline state for the upper layer protocol.
3900 lpfc_offline(struct lpfc_hba *phba)
3902 struct Scsi_Host *shost;
3903 struct lpfc_vport **vports;
3906 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3909 /* stop port and all timers associated with this hba */
3910 lpfc_stop_port(phba);
3912 /* Tear down the local and target port registrations. The
3913 * nvme transports need to cleanup.
3915 lpfc_nvmet_destroy_targetport(phba);
3916 lpfc_nvme_destroy_localport(phba->pport);
3918 vports = lpfc_create_vport_work_array(phba);
3920 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3921 lpfc_stop_vport_timers(vports[i]);
3922 lpfc_destroy_vport_work_array(phba, vports);
3923 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3924 "0460 Bring Adapter offline\n");
3925 /* Bring down the SLI Layer and cleanup. The HBA is offline
3927 lpfc_sli_hba_down(phba);
3928 spin_lock_irq(&phba->hbalock);
3930 spin_unlock_irq(&phba->hbalock);
3931 vports = lpfc_create_vport_work_array(phba);
3933 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3934 shost = lpfc_shost_from_vport(vports[i]);
3935 spin_lock_irq(shost->host_lock);
3936 vports[i]->work_port_events = 0;
3937 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3938 spin_unlock_irq(shost->host_lock);
3940 lpfc_destroy_vport_work_array(phba, vports);
3941 /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3944 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3945 __lpfc_cpuhp_remove(phba);
3947 if (phba->cfg_xri_rebalancing)
3948 lpfc_destroy_multixri_pools(phba);
3952 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3953 * @phba: pointer to lpfc hba data structure.
3955 * This routine is to free all the SCSI buffers and IOCBs from the driver
3956 * list back to kernel. It is called from lpfc_pci_remove_one to free
3957 * the internal resources before the device is removed from the system.
3960 lpfc_scsi_free(struct lpfc_hba *phba)
3962 struct lpfc_io_buf *sb, *sb_next;
3964 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3967 spin_lock_irq(&phba->hbalock);
3969 /* Release all the lpfc_scsi_bufs maintained by this host. */
3971 spin_lock(&phba->scsi_buf_list_put_lock);
3972 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3974 list_del(&sb->list);
3975 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3978 phba->total_scsi_bufs--;
3980 spin_unlock(&phba->scsi_buf_list_put_lock);
3982 spin_lock(&phba->scsi_buf_list_get_lock);
3983 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3985 list_del(&sb->list);
3986 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3989 phba->total_scsi_bufs--;
3991 spin_unlock(&phba->scsi_buf_list_get_lock);
3992 spin_unlock_irq(&phba->hbalock);
3996 * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3997 * @phba: pointer to lpfc hba data structure.
3999 * This routine is to free all the IO buffers and IOCBs from the driver
4000 * list back to kernel. It is called from lpfc_pci_remove_one to free
4001 * the internal resources before the device is removed from the system.
4004 lpfc_io_free(struct lpfc_hba *phba)
4006 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4007 struct lpfc_sli4_hdw_queue *qp;
4010 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4011 qp = &phba->sli4_hba.hdwq[idx];
4012 /* Release all the lpfc_nvme_bufs maintained by this host. */
4013 spin_lock(&qp->io_buf_list_put_lock);
4014 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4015 &qp->lpfc_io_buf_list_put,
4017 list_del(&lpfc_ncmd->list);
4019 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4020 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4021 if (phba->cfg_xpsgl && !phba->nvmet_support)
4022 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4023 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4025 qp->total_io_bufs--;
4027 spin_unlock(&qp->io_buf_list_put_lock);
4029 spin_lock(&qp->io_buf_list_get_lock);
4030 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4031 &qp->lpfc_io_buf_list_get,
4033 list_del(&lpfc_ncmd->list);
4035 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4036 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4037 if (phba->cfg_xpsgl && !phba->nvmet_support)
4038 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4039 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4041 qp->total_io_bufs--;
4043 spin_unlock(&qp->io_buf_list_get_lock);
4048 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4049 * @phba: pointer to lpfc hba data structure.
4051 * This routine first calculates the sizes of the current els and allocated
4052 * scsi sgl lists, and then goes through all sgls to updates the physical
4053 * XRIs assigned due to port function reset. During port initialization, the
4054 * current els and allocated scsi sgl lists are 0s.
4057 * 0 - successful (for now, it always returns 0)
4060 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4062 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4063 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4064 LIST_HEAD(els_sgl_list);
4068 * update on pci function's els xri-sgl list
4070 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4072 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4073 /* els xri-sgl expanded */
4074 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4075 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4076 "3157 ELS xri-sgl count increased from "
4077 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4079 /* allocate the additional els sgls */
4080 for (i = 0; i < xri_cnt; i++) {
4081 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4083 if (sglq_entry == NULL) {
4084 lpfc_printf_log(phba, KERN_ERR,
4086 "2562 Failure to allocate an "
4087 "ELS sgl entry:%d\n", i);
4091 sglq_entry->buff_type = GEN_BUFF_TYPE;
4092 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4094 if (sglq_entry->virt == NULL) {
4096 lpfc_printf_log(phba, KERN_ERR,
4098 "2563 Failure to allocate an "
4099 "ELS mbuf:%d\n", i);
4103 sglq_entry->sgl = sglq_entry->virt;
4104 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4105 sglq_entry->state = SGL_FREED;
4106 list_add_tail(&sglq_entry->list, &els_sgl_list);
4108 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4109 list_splice_init(&els_sgl_list,
4110 &phba->sli4_hba.lpfc_els_sgl_list);
4111 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4112 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4113 /* els xri-sgl shrinked */
4114 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4115 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4116 "3158 ELS xri-sgl count decreased from "
4117 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4119 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4120 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4122 /* release extra els sgls from list */
4123 for (i = 0; i < xri_cnt; i++) {
4124 list_remove_head(&els_sgl_list,
4125 sglq_entry, struct lpfc_sglq, list);
4127 __lpfc_mbuf_free(phba, sglq_entry->virt,
4132 list_splice_init(&els_sgl_list,
4133 &phba->sli4_hba.lpfc_els_sgl_list);
4134 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4136 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4137 "3163 ELS xri-sgl count unchanged: %d\n",
4139 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4141 /* update xris to els sgls on the list */
4143 sglq_entry_next = NULL;
4144 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4145 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4146 lxri = lpfc_sli4_next_xritag(phba);
4147 if (lxri == NO_XRI) {
4148 lpfc_printf_log(phba, KERN_ERR,
4150 "2400 Failed to allocate xri for "
4155 sglq_entry->sli4_lxritag = lxri;
4156 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4161 lpfc_free_els_sgl_list(phba);
4166 * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4167 * @phba: pointer to lpfc hba data structure.
4169 * This routine first calculates the sizes of the current els and allocated
4170 * scsi sgl lists, and then goes through all sgls to updates the physical
4171 * XRIs assigned due to port function reset. During port initialization, the
4172 * current els and allocated scsi sgl lists are 0s.
4175 * 0 - successful (for now, it always returns 0)
4178 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4180 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4181 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4182 uint16_t nvmet_xri_cnt;
4183 LIST_HEAD(nvmet_sgl_list);
4187 * update on pci function's nvmet xri-sgl list
4189 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4191 /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4192 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4193 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4194 /* els xri-sgl expanded */
4195 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4196 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4197 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4198 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4199 /* allocate the additional nvmet sgls */
4200 for (i = 0; i < xri_cnt; i++) {
4201 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4203 if (sglq_entry == NULL) {
4204 lpfc_printf_log(phba, KERN_ERR,
4206 "6303 Failure to allocate an "
4207 "NVMET sgl entry:%d\n", i);
4211 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4212 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4214 if (sglq_entry->virt == NULL) {
4216 lpfc_printf_log(phba, KERN_ERR,
4218 "6304 Failure to allocate an "
4219 "NVMET buf:%d\n", i);
4223 sglq_entry->sgl = sglq_entry->virt;
4224 memset(sglq_entry->sgl, 0,
4225 phba->cfg_sg_dma_buf_size);
4226 sglq_entry->state = SGL_FREED;
4227 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4229 spin_lock_irq(&phba->hbalock);
4230 spin_lock(&phba->sli4_hba.sgl_list_lock);
4231 list_splice_init(&nvmet_sgl_list,
4232 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4233 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4234 spin_unlock_irq(&phba->hbalock);
4235 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4236 /* nvmet xri-sgl shrunk */
4237 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4238 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4239 "6305 NVMET xri-sgl count decreased from "
4240 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4242 spin_lock_irq(&phba->hbalock);
4243 spin_lock(&phba->sli4_hba.sgl_list_lock);
4244 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4246 /* release extra nvmet sgls from list */
4247 for (i = 0; i < xri_cnt; i++) {
4248 list_remove_head(&nvmet_sgl_list,
4249 sglq_entry, struct lpfc_sglq, list);
4251 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4256 list_splice_init(&nvmet_sgl_list,
4257 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4258 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4259 spin_unlock_irq(&phba->hbalock);
4261 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4262 "6306 NVMET xri-sgl count unchanged: %d\n",
4264 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4266 /* update xris to nvmet sgls on the list */
4268 sglq_entry_next = NULL;
4269 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4270 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4271 lxri = lpfc_sli4_next_xritag(phba);
4272 if (lxri == NO_XRI) {
4273 lpfc_printf_log(phba, KERN_ERR,
4275 "6307 Failed to allocate xri for "
4280 sglq_entry->sli4_lxritag = lxri;
4281 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4286 lpfc_free_nvmet_sgl_list(phba);
4291 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4294 struct lpfc_sli4_hdw_queue *qp;
4295 struct lpfc_io_buf *lpfc_cmd;
4296 struct lpfc_io_buf *iobufp, *prev_iobufp;
4297 int idx, cnt, xri, inserted;
4300 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4301 qp = &phba->sli4_hba.hdwq[idx];
4302 spin_lock_irq(&qp->io_buf_list_get_lock);
4303 spin_lock(&qp->io_buf_list_put_lock);
4305 /* Take everything off the get and put lists */
4306 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4307 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4308 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4309 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4310 cnt += qp->get_io_bufs + qp->put_io_bufs;
4311 qp->get_io_bufs = 0;
4312 qp->put_io_bufs = 0;
4313 qp->total_io_bufs = 0;
4314 spin_unlock(&qp->io_buf_list_put_lock);
4315 spin_unlock_irq(&qp->io_buf_list_get_lock);
4319 * Take IO buffers off blist and put on cbuf sorted by XRI.
4320 * This is because POST_SGL takes a sequential range of XRIs
4321 * to post to the firmware.
4323 for (idx = 0; idx < cnt; idx++) {
4324 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4328 list_add_tail(&lpfc_cmd->list, cbuf);
4331 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4334 list_for_each_entry(iobufp, cbuf, list) {
4335 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4337 list_add(&lpfc_cmd->list,
4338 &prev_iobufp->list);
4340 list_add(&lpfc_cmd->list, cbuf);
4344 prev_iobufp = iobufp;
4347 list_add_tail(&lpfc_cmd->list, cbuf);
4353 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4355 struct lpfc_sli4_hdw_queue *qp;
4356 struct lpfc_io_buf *lpfc_cmd;
4359 qp = phba->sli4_hba.hdwq;
4361 while (!list_empty(cbuf)) {
4362 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4363 list_remove_head(cbuf, lpfc_cmd,
4364 struct lpfc_io_buf, list);
4368 qp = &phba->sli4_hba.hdwq[idx];
4369 lpfc_cmd->hdwq_no = idx;
4370 lpfc_cmd->hdwq = qp;
4371 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4372 spin_lock(&qp->io_buf_list_put_lock);
4373 list_add_tail(&lpfc_cmd->list,
4374 &qp->lpfc_io_buf_list_put);
4376 qp->total_io_bufs++;
4377 spin_unlock(&qp->io_buf_list_put_lock);
4384 * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4385 * @phba: pointer to lpfc hba data structure.
4387 * This routine first calculates the sizes of the current els and allocated
4388 * scsi sgl lists, and then goes through all sgls to updates the physical
4389 * XRIs assigned due to port function reset. During port initialization, the
4390 * current els and allocated scsi sgl lists are 0s.
4393 * 0 - successful (for now, it always returns 0)
4396 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4398 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4399 uint16_t i, lxri, els_xri_cnt;
4400 uint16_t io_xri_cnt, io_xri_max;
4401 LIST_HEAD(io_sgl_list);
4405 * update on pci function's allocated nvme xri-sgl list
4408 /* maximum number of xris available for nvme buffers */
4409 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4410 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4411 phba->sli4_hba.io_xri_max = io_xri_max;
4413 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4414 "6074 Current allocated XRI sgl count:%d, "
4415 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4416 phba->sli4_hba.io_xri_cnt,
4417 phba->sli4_hba.io_xri_max,
4420 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4422 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4423 /* max nvme xri shrunk below the allocated nvme buffers */
4424 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4425 phba->sli4_hba.io_xri_max;
4426 /* release the extra allocated nvme buffers */
4427 for (i = 0; i < io_xri_cnt; i++) {
4428 list_remove_head(&io_sgl_list, lpfc_ncmd,
4429 struct lpfc_io_buf, list);
4431 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4433 lpfc_ncmd->dma_handle);
4437 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4440 /* update xris associated to remaining allocated nvme buffers */
4442 lpfc_ncmd_next = NULL;
4443 phba->sli4_hba.io_xri_cnt = cnt;
4444 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4445 &io_sgl_list, list) {
4446 lxri = lpfc_sli4_next_xritag(phba);
4447 if (lxri == NO_XRI) {
4448 lpfc_printf_log(phba, KERN_ERR,
4450 "6075 Failed to allocate xri for "
4455 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4456 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4458 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4467 * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4468 * @phba: Pointer to lpfc hba data structure.
4469 * @num_to_alloc: The requested number of buffers to allocate.
4471 * This routine allocates nvme buffers for device with SLI-4 interface spec,
4472 * the nvme buffer contains all the necessary information needed to initiate
4473 * an I/O. After allocating up to @num_to_allocate IO buffers and put
4474 * them on a list, it post them to the port by using SGL block post.
4477 * int - number of IO buffers that were allocated and posted.
4478 * 0 = failure, less than num_to_alloc is a partial failure.
4481 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4483 struct lpfc_io_buf *lpfc_ncmd;
4484 struct lpfc_iocbq *pwqeq;
4485 uint16_t iotag, lxri = 0;
4486 int bcnt, num_posted;
4487 LIST_HEAD(prep_nblist);
4488 LIST_HEAD(post_nblist);
4489 LIST_HEAD(nvme_nblist);
4491 phba->sli4_hba.io_xri_cnt = 0;
4492 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4493 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4497 * Get memory from the pci pool to map the virt space to
4498 * pci bus space for an I/O. The DMA buffer includes the
4499 * number of SGE's necessary to support the sg_tablesize.
4501 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4503 &lpfc_ncmd->dma_handle);
4504 if (!lpfc_ncmd->data) {
4509 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4510 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4513 * 4K Page alignment is CRITICAL to BlockGuard, double
4516 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4517 (((unsigned long)(lpfc_ncmd->data) &
4518 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4519 lpfc_printf_log(phba, KERN_ERR,
4521 "3369 Memory alignment err: "
4523 (unsigned long)lpfc_ncmd->data);
4524 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4526 lpfc_ncmd->dma_handle);
4532 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4534 lxri = lpfc_sli4_next_xritag(phba);
4535 if (lxri == NO_XRI) {
4536 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4537 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4541 pwqeq = &lpfc_ncmd->cur_iocbq;
4543 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4544 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4546 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4547 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4550 "6121 Failed to allocate IOTAG for"
4551 " XRI:0x%x\n", lxri);
4552 lpfc_sli4_free_xri(phba, lxri);
4555 pwqeq->sli4_lxritag = lxri;
4556 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4558 /* Initialize local short-hand pointers. */
4559 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4560 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4561 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4562 spin_lock_init(&lpfc_ncmd->buf_lock);
4564 /* add the nvme buffer to a post list */
4565 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4566 phba->sli4_hba.io_xri_cnt++;
4568 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4569 "6114 Allocate %d out of %d requested new NVME "
4570 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4571 sizeof(*lpfc_ncmd));
4574 /* post the list of nvme buffer sgls to port if available */
4575 if (!list_empty(&post_nblist))
4576 num_posted = lpfc_sli4_post_io_sgl_list(
4577 phba, &post_nblist, bcnt);
4585 lpfc_get_wwpn(struct lpfc_hba *phba)
4589 LPFC_MBOXQ_t *mboxq;
4592 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4595 return (uint64_t)-1;
4597 /* First get WWN of HBA instance */
4598 lpfc_read_nv(phba, mboxq);
4599 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4600 if (rc != MBX_SUCCESS) {
4601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4602 "6019 Mailbox failed , mbxCmd x%x "
4603 "READ_NV, mbxStatus x%x\n",
4604 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4605 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4606 mempool_free(mboxq, phba->mbox_mem_pool);
4607 return (uint64_t) -1;
4610 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4611 /* wwn is WWPN of HBA instance */
4612 mempool_free(mboxq, phba->mbox_mem_pool);
4613 if (phba->sli_rev == LPFC_SLI_REV4)
4614 return be64_to_cpu(wwn);
4616 return rol64(wwn, 32);
4620 * lpfc_vmid_res_alloc - Allocates resources for VMID
4621 * @phba: pointer to lpfc hba data structure.
4622 * @vport: pointer to vport data structure
4624 * This routine allocated the resources needed for the VMID.
4631 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4633 /* VMID feature is supported only on SLI4 */
4634 if (phba->sli_rev == LPFC_SLI_REV3) {
4635 phba->cfg_vmid_app_header = 0;
4636 phba->cfg_vmid_priority_tagging = 0;
4639 if (lpfc_is_vmid_enabled(phba)) {
4641 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4646 rwlock_init(&vport->vmid_lock);
4648 /* Set the VMID parameters for the vport */
4649 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4650 vport->vmid_inactivity_timeout =
4651 phba->cfg_vmid_inactivity_timeout;
4652 vport->max_vmid = phba->cfg_max_vmid;
4653 vport->cur_vmid_cnt = 0;
4655 vport->vmid_priority_range = bitmap_zalloc
4656 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4658 if (!vport->vmid_priority_range) {
4663 hash_init(vport->hash_table);
4669 * lpfc_create_port - Create an FC port
4670 * @phba: pointer to lpfc hba data structure.
4671 * @instance: a unique integer ID to this FC port.
4672 * @dev: pointer to the device data structure.
4674 * This routine creates a FC port for the upper layer protocol. The FC port
4675 * can be created on top of either a physical port or a virtual port provided
4676 * by the HBA. This routine also allocates a SCSI host data structure (shost)
4677 * and associates the FC port created before adding the shost into the SCSI
4681 * @vport - pointer to the virtual N_Port data structure.
4682 * NULL - port create failed.
4685 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4687 struct lpfc_vport *vport;
4688 struct Scsi_Host *shost = NULL;
4689 struct scsi_host_template *template;
4693 bool use_no_reset_hba = false;
4696 if (lpfc_no_hba_reset_cnt) {
4697 if (phba->sli_rev < LPFC_SLI_REV4 &&
4698 dev == &phba->pcidev->dev) {
4699 /* Reset the port first */
4700 lpfc_sli_brdrestart(phba);
4701 rc = lpfc_sli_chipset_init(phba);
4705 wwn = lpfc_get_wwpn(phba);
4708 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4709 if (wwn == lpfc_no_hba_reset[i]) {
4710 lpfc_printf_log(phba, KERN_ERR,
4712 "6020 Setting use_no_reset port=%llx\n",
4714 use_no_reset_hba = true;
4719 /* Seed template for SCSI host registration */
4720 if (dev == &phba->pcidev->dev) {
4721 template = &phba->port_template;
4723 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4724 /* Seed physical port template */
4725 memcpy(template, &lpfc_template, sizeof(*template));
4727 if (use_no_reset_hba)
4728 /* template is for a no reset SCSI Host */
4729 template->eh_host_reset_handler = NULL;
4731 /* Template for all vports this physical port creates */
4732 memcpy(&phba->vport_template, &lpfc_template,
4734 phba->vport_template.shost_groups = lpfc_vport_groups;
4735 phba->vport_template.eh_bus_reset_handler = NULL;
4736 phba->vport_template.eh_host_reset_handler = NULL;
4737 phba->vport_template.vendor_id = 0;
4739 /* Initialize the host templates with updated value */
4740 if (phba->sli_rev == LPFC_SLI_REV4) {
4741 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4742 phba->vport_template.sg_tablesize =
4743 phba->cfg_scsi_seg_cnt;
4745 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4746 phba->vport_template.sg_tablesize =
4747 phba->cfg_sg_seg_cnt;
4751 /* NVMET is for physical port only */
4752 memcpy(template, &lpfc_template_nvme,
4756 template = &phba->vport_template;
4759 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4763 vport = (struct lpfc_vport *) shost->hostdata;
4765 vport->load_flag |= FC_LOADING;
4766 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4767 vport->fc_rscn_flush = 0;
4768 lpfc_get_vport_cfgparam(vport);
4770 /* Adjust value in vport */
4771 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4773 shost->unique_id = instance;
4774 shost->max_id = LPFC_MAX_TARGET;
4775 shost->max_lun = vport->cfg_max_luns;
4776 shost->this_id = -1;
4777 shost->max_cmd_len = 16;
4779 if (phba->sli_rev == LPFC_SLI_REV4) {
4780 if (!phba->cfg_fcp_mq_threshold ||
4781 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4782 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4784 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4785 phba->cfg_fcp_mq_threshold);
4787 shost->dma_boundary =
4788 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4790 if (phba->cfg_xpsgl && !phba->nvmet_support)
4791 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4793 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4795 /* SLI-3 has a limited number of hardware queues (3),
4796 * thus there is only one for FCP processing.
4798 shost->nr_hw_queues = 1;
4801 * Set initial can_queue value since 0 is no longer supported and
4802 * scsi_add_host will fail. This will be adjusted later based on the
4803 * max xri value determined in hba setup.
4805 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4806 if (dev != &phba->pcidev->dev) {
4807 shost->transportt = lpfc_vport_transport_template;
4808 vport->port_type = LPFC_NPIV_PORT;
4810 shost->transportt = lpfc_transport_template;
4811 vport->port_type = LPFC_PHYSICAL_PORT;
4814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4815 "9081 CreatePort TMPLATE type %x TBLsize %d "
4817 vport->port_type, shost->sg_tablesize,
4818 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4820 /* Allocate the resources for VMID */
4821 rc = lpfc_vmid_res_alloc(phba, vport);
4826 /* Initialize all internally managed lists. */
4827 INIT_LIST_HEAD(&vport->fc_nodes);
4828 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4829 spin_lock_init(&vport->work_port_lock);
4831 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4833 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4835 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4837 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4838 lpfc_setup_bg(phba, shost);
4840 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4844 spin_lock_irq(&phba->port_list_lock);
4845 list_add_tail(&vport->listentry, &phba->port_list);
4846 spin_unlock_irq(&phba->port_list_lock);
4851 bitmap_free(vport->vmid_priority_range);
4852 scsi_host_put(shost);
4858 * destroy_port - destroy an FC port
4859 * @vport: pointer to an lpfc virtual N_Port data structure.
4861 * This routine destroys a FC port from the upper layer protocol. All the
4862 * resources associated with the port are released.
4865 destroy_port(struct lpfc_vport *vport)
4867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4868 struct lpfc_hba *phba = vport->phba;
4870 lpfc_debugfs_terminate(vport);
4871 fc_remove_host(shost);
4872 scsi_remove_host(shost);
4874 spin_lock_irq(&phba->port_list_lock);
4875 list_del_init(&vport->listentry);
4876 spin_unlock_irq(&phba->port_list_lock);
4878 lpfc_cleanup(vport);
4883 * lpfc_get_instance - Get a unique integer ID
4885 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4886 * uses the kernel idr facility to perform the task.
4889 * instance - a unique integer ID allocated as the new instance.
4890 * -1 - lpfc get instance failed.
4893 lpfc_get_instance(void)
4897 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4898 return ret < 0 ? -1 : ret;
4902 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4903 * @shost: pointer to SCSI host data structure.
4904 * @time: elapsed time of the scan in jiffies.
4906 * This routine is called by the SCSI layer with a SCSI host to determine
4907 * whether the scan host is finished.
4909 * Note: there is no scan_start function as adapter initialization will have
4910 * asynchronously kicked off the link initialization.
4913 * 0 - SCSI host scan is not over yet.
4914 * 1 - SCSI host scan is over.
4916 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4919 struct lpfc_hba *phba = vport->phba;
4922 spin_lock_irq(shost->host_lock);
4924 if (vport->load_flag & FC_UNLOADING) {
4928 if (time >= msecs_to_jiffies(30 * 1000)) {
4929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4930 "0461 Scanning longer than 30 "
4931 "seconds. Continuing initialization\n");
4935 if (time >= msecs_to_jiffies(15 * 1000) &&
4936 phba->link_state <= LPFC_LINK_DOWN) {
4937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938 "0465 Link down longer than 15 "
4939 "seconds. Continuing initialization\n");
4944 if (vport->port_state != LPFC_VPORT_READY)
4946 if (vport->num_disc_nodes || vport->fc_prli_sent)
4948 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4950 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4956 spin_unlock_irq(shost->host_lock);
4960 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4962 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963 struct lpfc_hba *phba = vport->phba;
4965 fc_host_supported_speeds(shost) = 0;
4967 * Avoid reporting supported link speed for FCoE as it can't be
4968 * controlled via FCoE.
4970 if (phba->hba_flag & HBA_FCOE_MODE)
4973 if (phba->lmt & LMT_256Gb)
4974 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4975 if (phba->lmt & LMT_128Gb)
4976 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4977 if (phba->lmt & LMT_64Gb)
4978 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4979 if (phba->lmt & LMT_32Gb)
4980 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4981 if (phba->lmt & LMT_16Gb)
4982 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4983 if (phba->lmt & LMT_10Gb)
4984 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4985 if (phba->lmt & LMT_8Gb)
4986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4987 if (phba->lmt & LMT_4Gb)
4988 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4989 if (phba->lmt & LMT_2Gb)
4990 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4991 if (phba->lmt & LMT_1Gb)
4992 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4996 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4997 * @shost: pointer to SCSI host data structure.
4999 * This routine initializes a given SCSI host attributes on a FC port. The
5000 * SCSI host can be either on top of a physical port or a virtual port.
5002 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5004 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005 struct lpfc_hba *phba = vport->phba;
5007 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
5010 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5012 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5014 memset(fc_host_supported_fc4s(shost), 0,
5015 sizeof(fc_host_supported_fc4s(shost)));
5016 fc_host_supported_fc4s(shost)[2] = 1;
5017 fc_host_supported_fc4s(shost)[7] = 1;
5019 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5020 sizeof fc_host_symbolic_name(shost));
5022 lpfc_host_supported_speeds_set(shost);
5024 fc_host_maxframe_size(shost) =
5025 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5028 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5030 /* This value is also unchanging */
5031 memset(fc_host_active_fc4s(shost), 0,
5032 sizeof(fc_host_active_fc4s(shost)));
5033 fc_host_active_fc4s(shost)[2] = 1;
5034 fc_host_active_fc4s(shost)[7] = 1;
5036 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037 spin_lock_irq(shost->host_lock);
5038 vport->load_flag &= ~FC_LOADING;
5039 spin_unlock_irq(shost->host_lock);
5043 * lpfc_stop_port_s3 - Stop SLI3 device port
5044 * @phba: pointer to lpfc hba data structure.
5046 * This routine is invoked to stop an SLI3 device port, it stops the device
5047 * from generating interrupts and stops the device driver's timers for the
5051 lpfc_stop_port_s3(struct lpfc_hba *phba)
5053 /* Clear all interrupt enable conditions */
5054 writel(0, phba->HCregaddr);
5055 readl(phba->HCregaddr); /* flush */
5056 /* Clear all pending interrupts */
5057 writel(0xffffffff, phba->HAregaddr);
5058 readl(phba->HAregaddr); /* flush */
5060 /* Reset some HBA SLI setup states */
5061 lpfc_stop_hba_timers(phba);
5062 phba->pport->work_port_events = 0;
5066 * lpfc_stop_port_s4 - Stop SLI4 device port
5067 * @phba: pointer to lpfc hba data structure.
5069 * This routine is invoked to stop an SLI4 device port, it stops the device
5070 * from generating interrupts and stops the device driver's timers for the
5074 lpfc_stop_port_s4(struct lpfc_hba *phba)
5076 /* Reset some HBA SLI4 setup states */
5077 lpfc_stop_hba_timers(phba);
5079 phba->pport->work_port_events = 0;
5080 phba->sli4_hba.intr_enable = 0;
5084 * lpfc_stop_port - Wrapper function for stopping hba port
5085 * @phba: Pointer to HBA context object.
5087 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5088 * the API jump table function pointer from the lpfc_hba struct.
5091 lpfc_stop_port(struct lpfc_hba *phba)
5093 phba->lpfc_stop_port(phba);
5096 flush_workqueue(phba->wq);
5100 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5101 * @phba: Pointer to hba for which this call is being executed.
5103 * This routine starts the timer waiting for the FCF rediscovery to complete.
5106 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5108 unsigned long fcf_redisc_wait_tmo =
5109 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5110 /* Start fcf rediscovery wait period timer */
5111 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5112 spin_lock_irq(&phba->hbalock);
5113 /* Allow action to new fcf asynchronous event */
5114 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5115 /* Mark the FCF rediscovery pending state */
5116 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5117 spin_unlock_irq(&phba->hbalock);
5121 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5122 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5124 * This routine is invoked when waiting for FCF table rediscover has been
5125 * timed out. If new FCF record(s) has (have) been discovered during the
5126 * wait period, a new FCF event shall be added to the FCOE async event
5127 * list, and then worker thread shall be waked up for processing from the
5128 * worker thread context.
5131 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5133 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5135 /* Don't send FCF rediscovery event if timer cancelled */
5136 spin_lock_irq(&phba->hbalock);
5137 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5138 spin_unlock_irq(&phba->hbalock);
5141 /* Clear FCF rediscovery timer pending flag */
5142 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5143 /* FCF rediscovery event to worker thread */
5144 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5145 spin_unlock_irq(&phba->hbalock);
5146 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147 "2776 FCF rediscover quiescent timer expired\n");
5148 /* wake up worker thread */
5149 lpfc_worker_wake_up(phba);
5153 * lpfc_vmid_poll - VMID timeout detection
5154 * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5156 * This routine is invoked when there is no I/O on by a VM for the specified
5157 * amount of time. When this situation is detected, the VMID has to be
5158 * deregistered from the switch and all the local resources freed. The VMID
5159 * will be reassigned to the VM once the I/O begins.
5162 lpfc_vmid_poll(struct timer_list *t)
5164 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5167 /* check if there is a need to issue QFPA */
5168 if (phba->pport->vmid_priority_tagging) {
5170 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5173 /* Is the vmid inactivity timer enabled */
5174 if (phba->pport->vmid_inactivity_timeout ||
5175 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5177 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5181 lpfc_worker_wake_up(phba);
5183 /* restart the timer for the next iteration */
5184 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5189 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5190 * @phba: pointer to lpfc hba data structure.
5191 * @acqe_link: pointer to the async link completion queue entry.
5193 * This routine is to parse the SLI4 link-attention link fault code.
5196 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5197 struct lpfc_acqe_link *acqe_link)
5199 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5200 case LPFC_ASYNC_LINK_FAULT_NONE:
5201 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5202 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5203 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5207 "0398 Unknown link fault code: x%x\n",
5208 bf_get(lpfc_acqe_link_fault, acqe_link));
5214 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5215 * @phba: pointer to lpfc hba data structure.
5216 * @acqe_link: pointer to the async link completion queue entry.
5218 * This routine is to parse the SLI4 link attention type and translate it
5219 * into the base driver's link attention type coding.
5221 * Return: Link attention type in terms of base driver's coding.
5224 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5225 struct lpfc_acqe_link *acqe_link)
5229 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5230 case LPFC_ASYNC_LINK_STATUS_DOWN:
5231 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5232 att_type = LPFC_ATT_LINK_DOWN;
5234 case LPFC_ASYNC_LINK_STATUS_UP:
5235 /* Ignore physical link up events - wait for logical link up */
5236 att_type = LPFC_ATT_RESERVED;
5238 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5239 att_type = LPFC_ATT_LINK_UP;
5242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5243 "0399 Invalid link attention type: x%x\n",
5244 bf_get(lpfc_acqe_link_status, acqe_link));
5245 att_type = LPFC_ATT_RESERVED;
5252 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5253 * @phba: pointer to lpfc hba data structure.
5255 * This routine is to get an SLI3 FC port's link speed in Mbps.
5257 * Return: link speed in terms of Mbps.
5260 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5262 uint32_t link_speed;
5264 if (!lpfc_is_link_up(phba))
5267 if (phba->sli_rev <= LPFC_SLI_REV3) {
5268 switch (phba->fc_linkspeed) {
5269 case LPFC_LINK_SPEED_1GHZ:
5272 case LPFC_LINK_SPEED_2GHZ:
5275 case LPFC_LINK_SPEED_4GHZ:
5278 case LPFC_LINK_SPEED_8GHZ:
5281 case LPFC_LINK_SPEED_10GHZ:
5284 case LPFC_LINK_SPEED_16GHZ:
5291 if (phba->sli4_hba.link_state.logical_speed)
5293 phba->sli4_hba.link_state.logical_speed;
5295 link_speed = phba->sli4_hba.link_state.speed;
5301 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5302 * @phba: pointer to lpfc hba data structure.
5303 * @evt_code: asynchronous event code.
5304 * @speed_code: asynchronous event link speed code.
5306 * This routine is to parse the giving SLI4 async event link speed code into
5307 * value of Mbps for the link speed.
5309 * Return: link speed in terms of Mbps.
5312 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5315 uint32_t port_speed;
5318 case LPFC_TRAILER_CODE_LINK:
5319 switch (speed_code) {
5320 case LPFC_ASYNC_LINK_SPEED_ZERO:
5323 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5326 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5329 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5332 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5335 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5338 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5341 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5344 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5345 port_speed = 100000;
5351 case LPFC_TRAILER_CODE_FC:
5352 switch (speed_code) {
5353 case LPFC_FC_LA_SPEED_UNKNOWN:
5356 case LPFC_FC_LA_SPEED_1G:
5359 case LPFC_FC_LA_SPEED_2G:
5362 case LPFC_FC_LA_SPEED_4G:
5365 case LPFC_FC_LA_SPEED_8G:
5368 case LPFC_FC_LA_SPEED_10G:
5371 case LPFC_FC_LA_SPEED_16G:
5374 case LPFC_FC_LA_SPEED_32G:
5377 case LPFC_FC_LA_SPEED_64G:
5380 case LPFC_FC_LA_SPEED_128G:
5381 port_speed = 128000;
5383 case LPFC_FC_LA_SPEED_256G:
5384 port_speed = 256000;
5397 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5398 * @phba: pointer to lpfc hba data structure.
5399 * @acqe_link: pointer to the async link completion queue entry.
5401 * This routine is to handle the SLI4 asynchronous FCoE link event.
5404 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5405 struct lpfc_acqe_link *acqe_link)
5409 struct lpfc_mbx_read_top *la;
5413 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5414 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5416 phba->fcoe_eventtag = acqe_link->event_tag;
5417 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5420 "0395 The mboxq allocation failed\n");
5424 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5427 "0396 mailbox allocation failed\n");
5431 /* Cleanup any outstanding ELS commands */
5432 lpfc_els_flush_all_cmd(phba);
5434 /* Block ELS IOCBs until we have done process link event */
5435 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5437 /* Update link event statistics */
5438 phba->sli.slistat.link_event++;
5440 /* Create lpfc_handle_latt mailbox command from link ACQE */
5441 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5442 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5443 pmb->vport = phba->pport;
5445 /* Keep the link status for extra SLI4 state machine reference */
5446 phba->sli4_hba.link_state.speed =
5447 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5448 bf_get(lpfc_acqe_link_speed, acqe_link));
5449 phba->sli4_hba.link_state.duplex =
5450 bf_get(lpfc_acqe_link_duplex, acqe_link);
5451 phba->sli4_hba.link_state.status =
5452 bf_get(lpfc_acqe_link_status, acqe_link);
5453 phba->sli4_hba.link_state.type =
5454 bf_get(lpfc_acqe_link_type, acqe_link);
5455 phba->sli4_hba.link_state.number =
5456 bf_get(lpfc_acqe_link_number, acqe_link);
5457 phba->sli4_hba.link_state.fault =
5458 bf_get(lpfc_acqe_link_fault, acqe_link);
5459 phba->sli4_hba.link_state.logical_speed =
5460 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5463 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5464 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5465 "Logical speed:%dMbps Fault:%d\n",
5466 phba->sli4_hba.link_state.speed,
5467 phba->sli4_hba.link_state.topology,
5468 phba->sli4_hba.link_state.status,
5469 phba->sli4_hba.link_state.type,
5470 phba->sli4_hba.link_state.number,
5471 phba->sli4_hba.link_state.logical_speed,
5472 phba->sli4_hba.link_state.fault);
5474 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5475 * topology info. Note: Optional for non FC-AL ports.
5477 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5479 if (rc == MBX_NOT_FINISHED)
5484 * For FCoE Mode: fill in all the topology information we need and call
5485 * the READ_TOPOLOGY completion routine to continue without actually
5486 * sending the READ_TOPOLOGY mailbox command to the port.
5488 /* Initialize completion status */
5490 mb->mbxStatus = MBX_SUCCESS;
5492 /* Parse port fault information field */
5493 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5495 /* Parse and translate link attention fields */
5496 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5497 la->eventTag = acqe_link->event_tag;
5498 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5499 bf_set(lpfc_mbx_read_top_link_spd, la,
5500 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5502 /* Fake the the following irrelvant fields */
5503 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5504 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5505 bf_set(lpfc_mbx_read_top_il, la, 0);
5506 bf_set(lpfc_mbx_read_top_pb, la, 0);
5507 bf_set(lpfc_mbx_read_top_fa, la, 0);
5508 bf_set(lpfc_mbx_read_top_mm, la, 0);
5510 /* Invoke the lpfc_handle_latt mailbox command callback function */
5511 lpfc_mbx_cmpl_read_topology(phba, pmb);
5516 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5520 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5522 * @phba: pointer to lpfc hba data structure.
5523 * @speed_code: asynchronous event link speed code.
5525 * This routine is to parse the giving SLI4 async event link speed code into
5526 * value of Read topology link speed.
5528 * Return: link speed in terms of Read topology.
5531 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5535 switch (speed_code) {
5536 case LPFC_FC_LA_SPEED_1G:
5537 port_speed = LPFC_LINK_SPEED_1GHZ;
5539 case LPFC_FC_LA_SPEED_2G:
5540 port_speed = LPFC_LINK_SPEED_2GHZ;
5542 case LPFC_FC_LA_SPEED_4G:
5543 port_speed = LPFC_LINK_SPEED_4GHZ;
5545 case LPFC_FC_LA_SPEED_8G:
5546 port_speed = LPFC_LINK_SPEED_8GHZ;
5548 case LPFC_FC_LA_SPEED_16G:
5549 port_speed = LPFC_LINK_SPEED_16GHZ;
5551 case LPFC_FC_LA_SPEED_32G:
5552 port_speed = LPFC_LINK_SPEED_32GHZ;
5554 case LPFC_FC_LA_SPEED_64G:
5555 port_speed = LPFC_LINK_SPEED_64GHZ;
5557 case LPFC_FC_LA_SPEED_128G:
5558 port_speed = LPFC_LINK_SPEED_128GHZ;
5560 case LPFC_FC_LA_SPEED_256G:
5561 port_speed = LPFC_LINK_SPEED_256GHZ;
5572 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5574 struct rxtable_entry *entry;
5575 int cnt = 0, head, tail, last, start;
5577 head = atomic_read(&phba->rxtable_idx_head);
5578 tail = atomic_read(&phba->rxtable_idx_tail);
5579 if (!phba->rxtable || head == tail) {
5580 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5581 "4411 Rxtable is empty\n");
5587 /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5588 while (start != last) {
5592 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5593 entry = &phba->rxtable[start];
5594 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5595 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5596 "Lat %lld ASz %lld Info %02d BWUtil %d "
5598 cnt, entry->max_bytes_per_interval,
5599 entry->total_bytes, entry->rcv_bytes,
5600 entry->avg_io_latency, entry->avg_io_size,
5601 entry->cmf_info, entry->timer_utilization,
5602 entry->timer_interval, start);
5604 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5610 * lpfc_cgn_update_stat - Save data into congestion stats buffer
5611 * @phba: pointer to lpfc hba data structure.
5612 * @dtag: FPIN descriptor received
5614 * Increment the FPIN received counter/time when it happens.
5617 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5619 struct lpfc_cgn_info *cp;
5621 struct timespec64 cur_time;
5625 /* Make sure we have a congestion info buffer */
5628 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5629 ktime_get_real_ts64(&cur_time);
5630 time64_to_tm(cur_time.tv_sec, 0, &broken);
5632 /* Update congestion statistics */
5634 case ELS_DTAG_LNK_INTEGRITY:
5635 cnt = le32_to_cpu(cp->link_integ_notification);
5637 cp->link_integ_notification = cpu_to_le32(cnt);
5639 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5640 cp->cgn_stat_lnk_day = broken.tm_mday;
5641 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5642 cp->cgn_stat_lnk_hour = broken.tm_hour;
5643 cp->cgn_stat_lnk_min = broken.tm_min;
5644 cp->cgn_stat_lnk_sec = broken.tm_sec;
5646 case ELS_DTAG_DELIVERY:
5647 cnt = le32_to_cpu(cp->delivery_notification);
5649 cp->delivery_notification = cpu_to_le32(cnt);
5651 cp->cgn_stat_del_month = broken.tm_mon + 1;
5652 cp->cgn_stat_del_day = broken.tm_mday;
5653 cp->cgn_stat_del_year = broken.tm_year - 100;
5654 cp->cgn_stat_del_hour = broken.tm_hour;
5655 cp->cgn_stat_del_min = broken.tm_min;
5656 cp->cgn_stat_del_sec = broken.tm_sec;
5658 case ELS_DTAG_PEER_CONGEST:
5659 cnt = le32_to_cpu(cp->cgn_peer_notification);
5661 cp->cgn_peer_notification = cpu_to_le32(cnt);
5663 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5664 cp->cgn_stat_peer_day = broken.tm_mday;
5665 cp->cgn_stat_peer_year = broken.tm_year - 100;
5666 cp->cgn_stat_peer_hour = broken.tm_hour;
5667 cp->cgn_stat_peer_min = broken.tm_min;
5668 cp->cgn_stat_peer_sec = broken.tm_sec;
5670 case ELS_DTAG_CONGESTION:
5671 cnt = le32_to_cpu(cp->cgn_notification);
5673 cp->cgn_notification = cpu_to_le32(cnt);
5675 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5676 cp->cgn_stat_cgn_day = broken.tm_mday;
5677 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5678 cp->cgn_stat_cgn_hour = broken.tm_hour;
5679 cp->cgn_stat_cgn_min = broken.tm_min;
5680 cp->cgn_stat_cgn_sec = broken.tm_sec;
5682 if (phba->cgn_fpin_frequency &&
5683 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5684 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5685 cp->cgn_stat_npm = value;
5687 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5688 LPFC_CGN_CRC32_SEED);
5689 cp->cgn_info_crc = cpu_to_le32(value);
5693 * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5694 * @phba: pointer to lpfc hba data structure.
5696 * Save the congestion event data every minute.
5697 * On the hour collapse all the minute data into hour data. Every day
5698 * collapse all the hour data into daily data. Separate driver
5699 * and fabrc congestion event counters that will be saved out
5700 * to the registered congestion buffer every minute.
5703 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5705 struct lpfc_cgn_info *cp;
5707 struct timespec64 cur_time;
5709 uint16_t value, mvalue;
5712 uint32_t dvalue, wvalue, lvalue, avalue;
5718 /* Make sure we have a congestion info buffer */
5721 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5723 if (time_before(jiffies, phba->cgn_evt_timestamp))
5725 phba->cgn_evt_timestamp = jiffies +
5726 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5727 phba->cgn_evt_minute++;
5729 /* We should get to this point in the routine on 1 minute intervals */
5731 ktime_get_real_ts64(&cur_time);
5732 time64_to_tm(cur_time.tv_sec, 0, &broken);
5734 if (phba->cgn_fpin_frequency &&
5735 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5736 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5737 cp->cgn_stat_npm = value;
5740 /* Read and clear the latency counters for this minute */
5741 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5742 latsum = atomic64_read(&phba->cgn_latency_evt);
5743 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5744 atomic64_set(&phba->cgn_latency_evt, 0);
5746 /* We need to store MB/sec bandwidth in the congestion information.
5747 * block_cnt is count of 512 byte blocks for the entire minute,
5748 * bps will get bytes per sec before finally converting to MB/sec.
5750 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5751 phba->rx_block_cnt = 0;
5752 mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5755 /* cgn parameters */
5756 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5757 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5758 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5759 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5761 /* Fill in default LUN qdepth */
5762 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5763 cp->cgn_lunq = cpu_to_le16(value);
5765 /* Record congestion buffer info - every minute
5766 * cgn_driver_evt_cnt (Driver events)
5767 * cgn_fabric_warn_cnt (Congestion Warnings)
5768 * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5769 * cgn_fabric_alarm_cnt (Congestion Alarms)
5771 index = ++cp->cgn_index_minute;
5772 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5773 cp->cgn_index_minute = 0;
5777 /* Get the number of driver events in this sample and reset counter */
5778 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5779 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5781 /* Get the number of warning events - FPIN and Signal for this minute */
5783 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5784 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5785 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5786 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5787 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5789 /* Get the number of alarm events - FPIN and Signal for this minute */
5791 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5792 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5793 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5794 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5796 /* Collect the driver, warning, alarm and latency counts for this
5797 * minute into the driver congestion buffer.
5799 ptr = &cp->cgn_drvr_min[index];
5800 value = (uint16_t)dvalue;
5801 *ptr = cpu_to_le16(value);
5803 ptr = &cp->cgn_warn_min[index];
5804 value = (uint16_t)wvalue;
5805 *ptr = cpu_to_le16(value);
5807 ptr = &cp->cgn_alarm_min[index];
5808 value = (uint16_t)avalue;
5809 *ptr = cpu_to_le16(value);
5811 lptr = &cp->cgn_latency_min[index];
5813 lvalue = (uint32_t)div_u64(latsum, lvalue);
5814 *lptr = cpu_to_le32(lvalue);
5819 /* Collect the bandwidth value into the driver's congesion buffer. */
5820 mptr = &cp->cgn_bw_min[index];
5821 *mptr = cpu_to_le16(mvalue);
5823 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5824 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5825 index, dvalue, wvalue, *lptr, mvalue, avalue);
5828 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5829 /* Record congestion buffer info - every hour
5830 * Collapse all minutes into an hour
5832 index = ++cp->cgn_index_hour;
5833 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5834 cp->cgn_index_hour = 0;
5844 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5845 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5846 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5847 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5848 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5849 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5851 if (lvalue) /* Avg of latency averages */
5852 lvalue /= LPFC_MIN_HOUR;
5853 if (mbps) /* Avg of Bandwidth averages */
5854 mvalue = mbps / LPFC_MIN_HOUR;
5856 lptr = &cp->cgn_drvr_hr[index];
5857 *lptr = cpu_to_le32(dvalue);
5858 lptr = &cp->cgn_warn_hr[index];
5859 *lptr = cpu_to_le32(wvalue);
5860 lptr = &cp->cgn_latency_hr[index];
5861 *lptr = cpu_to_le32(lvalue);
5862 mptr = &cp->cgn_bw_hr[index];
5863 *mptr = cpu_to_le16(mvalue);
5864 lptr = &cp->cgn_alarm_hr[index];
5865 *lptr = cpu_to_le32(avalue);
5867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5868 "2419 Congestion Info - hour "
5869 "(%d): %d %d %d %d %d\n",
5870 index, dvalue, wvalue, lvalue, mvalue, avalue);
5874 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5875 /* Record congestion buffer info - every hour
5876 * Collapse all hours into a day. Rotate days
5877 * after LPFC_MAX_CGN_DAYS.
5879 index = ++cp->cgn_index_day;
5880 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5881 cp->cgn_index_day = 0;
5885 /* Anytime we overwrite daily index 0, after we wrap,
5886 * we will be overwriting the oldest day, so we must
5887 * update the congestion data start time for that day.
5888 * That start time should have previously been saved after
5889 * we wrote the last days worth of data.
5891 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5892 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5894 cp->cgn_info_month = broken.tm_mon + 1;
5895 cp->cgn_info_day = broken.tm_mday;
5896 cp->cgn_info_year = broken.tm_year - 100;
5897 cp->cgn_info_hour = broken.tm_hour;
5898 cp->cgn_info_minute = broken.tm_min;
5899 cp->cgn_info_second = broken.tm_sec;
5902 (phba, KERN_INFO, LOG_CGN_MGMT,
5903 "2646 CGNInfo idx0 Start Time: "
5904 "%d/%d/%d %d:%d:%d\n",
5905 cp->cgn_info_day, cp->cgn_info_month,
5906 cp->cgn_info_year, cp->cgn_info_hour,
5907 cp->cgn_info_minute, cp->cgn_info_second);
5916 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5917 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5918 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5919 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5920 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5921 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5923 if (lvalue) /* Avg of latency averages */
5924 lvalue /= LPFC_HOUR_DAY;
5925 if (mbps) /* Avg of Bandwidth averages */
5926 mvalue = mbps / LPFC_HOUR_DAY;
5928 lptr = &cp->cgn_drvr_day[index];
5929 *lptr = cpu_to_le32(dvalue);
5930 lptr = &cp->cgn_warn_day[index];
5931 *lptr = cpu_to_le32(wvalue);
5932 lptr = &cp->cgn_latency_day[index];
5933 *lptr = cpu_to_le32(lvalue);
5934 mptr = &cp->cgn_bw_day[index];
5935 *mptr = cpu_to_le16(mvalue);
5936 lptr = &cp->cgn_alarm_day[index];
5937 *lptr = cpu_to_le32(avalue);
5939 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5940 "2420 Congestion Info - daily (%d): "
5942 index, dvalue, wvalue, lvalue, mvalue, avalue);
5944 /* We just wrote LPFC_MAX_CGN_DAYS of data,
5945 * so we are wrapped on any data after this.
5946 * Save this as the start time for the next day.
5948 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5949 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5950 ktime_get_real_ts64(&phba->cgn_daily_ts);
5954 /* Use the frequency found in the last rcv'ed FPIN */
5955 value = phba->cgn_fpin_frequency;
5956 cp->cgn_warn_freq = cpu_to_le16(value);
5957 cp->cgn_alarm_freq = cpu_to_le16(value);
5959 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5960 LPFC_CGN_CRC32_SEED);
5961 cp->cgn_info_crc = cpu_to_le32(lvalue);
5965 * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5966 * @phba: The Hba for which this call is being executed.
5968 * The routine calculates the latency from the beginning of the CMF timer
5969 * interval to the current point in time. It is called from IO completion
5970 * when we exceed our Bandwidth limitation for the time interval.
5973 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5975 struct timespec64 cmpl_time;
5978 ktime_get_real_ts64(&cmpl_time);
5980 /* This routine works on a ms granularity so sec and usec are
5981 * converted accordingly.
5983 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5984 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5987 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5988 msec = (cmpl_time.tv_sec -
5989 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5990 msec += ((cmpl_time.tv_nsec -
5991 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5993 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5995 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5996 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
6003 * lpfc_cmf_timer - This is the timer function for one congestion
6005 * @timer: Pointer to the high resolution timer that expired
6007 static enum hrtimer_restart
6008 lpfc_cmf_timer(struct hrtimer *timer)
6010 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
6012 struct rxtable_entry *entry;
6014 uint32_t head, tail;
6015 uint32_t busy, max_read;
6016 uint64_t total, rcv, lat, mbpi, extra, cnt;
6017 int timer_interval = LPFC_CMF_INTERVAL;
6019 struct lpfc_cgn_stat *cgs;
6022 /* Only restart the timer if congestion mgmt is on */
6023 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6024 !phba->cmf_latency.tv_sec) {
6025 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6026 "6224 CMF timer exit: %d %lld\n",
6027 phba->cmf_active_mode,
6028 (uint64_t)phba->cmf_latency.tv_sec);
6029 return HRTIMER_NORESTART;
6032 /* If pport is not ready yet, just exit and wait for
6033 * the next timer cycle to hit.
6038 /* Do not block SCSI IO while in the timer routine since
6039 * total_bytes will be cleared
6041 atomic_set(&phba->cmf_stop_io, 1);
6043 /* First we need to calculate the actual ms between
6044 * the last timer interrupt and this one. We ask for
6045 * LPFC_CMF_INTERVAL, however the actual time may
6046 * vary depending on system overhead.
6048 ms = lpfc_calc_cmf_latency(phba);
6051 /* Immediately after we calculate the time since the last
6052 * timer interrupt, set the start time for the next
6055 ktime_get_real_ts64(&phba->cmf_latency);
6057 phba->cmf_link_byte_count =
6058 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6060 /* Collect all the stats from the prior timer interval */
6065 for_each_present_cpu(cpu) {
6066 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6067 total += atomic64_xchg(&cgs->total_bytes, 0);
6068 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6069 lat += atomic64_xchg(&cgs->rx_latency, 0);
6070 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6073 /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6074 * returned from the last CMF_SYNC_WQE issued, from
6075 * cmf_last_sync_bw. This will be the target BW for
6076 * this next timer interval.
6078 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6079 phba->link_state != LPFC_LINK_DOWN &&
6080 phba->hba_flag & HBA_SETUP) {
6081 mbpi = phba->cmf_last_sync_bw;
6082 phba->cmf_last_sync_bw = 0;
6085 /* Calculate any extra bytes needed to account for the
6086 * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6087 * calculate the adjustment needed for total to reflect
6088 * a full LPFC_CMF_INTERVAL.
6090 if (ms && ms < LPFC_CMF_INTERVAL) {
6091 cnt = div_u64(total, ms); /* bytes per ms */
6092 cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6094 /* If the timeout is scheduled to be shorter,
6095 * this value may skew the data, so cap it at mbpi.
6097 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6100 extra = cnt - total;
6102 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6104 /* For Monitor mode or link down we want mbpi
6105 * to be the full link speed
6107 mbpi = phba->cmf_link_byte_count;
6110 phba->cmf_timer_cnt++;
6113 /* Update congestion info buffer latency in us */
6114 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6115 atomic64_add(lat, &phba->cgn_latency_evt);
6117 busy = atomic_xchg(&phba->cmf_busy, 0);
6118 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6120 /* Calculate MBPI for the next timer interval */
6122 if (mbpi > phba->cmf_link_byte_count ||
6123 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6124 mbpi = phba->cmf_link_byte_count;
6126 /* Change max_bytes_per_interval to what the prior
6127 * CMF_SYNC_WQE cmpl indicated.
6129 if (mbpi != phba->cmf_max_bytes_per_interval)
6130 phba->cmf_max_bytes_per_interval = mbpi;
6133 /* Save rxmonitor information for debug */
6134 if (phba->rxtable) {
6135 head = atomic_xchg(&phba->rxtable_idx_head,
6136 LPFC_RXMONITOR_TABLE_IN_USE);
6137 entry = &phba->rxtable[head];
6138 entry->total_bytes = total;
6139 entry->cmf_bytes = total + extra;
6140 entry->rcv_bytes = rcv;
6141 entry->cmf_busy = busy;
6142 entry->cmf_info = phba->cmf_active_info;
6144 entry->avg_io_latency = div_u64(lat, io_cnt);
6145 entry->avg_io_size = div_u64(rcv, io_cnt);
6147 entry->avg_io_latency = 0;
6148 entry->avg_io_size = 0;
6150 entry->max_read_cnt = max_read;
6151 entry->io_cnt = io_cnt;
6152 entry->max_bytes_per_interval = mbpi;
6153 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6154 entry->timer_utilization = phba->cmf_last_ts;
6156 entry->timer_utilization = ms;
6157 entry->timer_interval = ms;
6158 phba->cmf_last_ts = 0;
6160 /* Increment rxtable index */
6161 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6162 tail = atomic_read(&phba->rxtable_idx_tail);
6164 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6165 atomic_set(&phba->rxtable_idx_tail, tail);
6167 atomic_set(&phba->rxtable_idx_head, head);
6170 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6171 /* If Monitor mode, check if we are oversubscribed
6172 * against the full line rate.
6174 if (mbpi && total > mbpi)
6175 atomic_inc(&phba->cgn_driver_evt_cnt);
6177 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6179 /* Each minute save Fabric and Driver congestion information */
6180 lpfc_cgn_save_evt_cnt(phba);
6182 phba->hba_flag &= ~HBA_SHORT_CMF;
6184 /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6185 * minute, adjust our next timer interval, if needed, to ensure a
6186 * 1 minute granularity when we get the next timer interrupt.
6188 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6189 phba->cgn_evt_timestamp)) {
6190 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6192 if (timer_interval <= 0)
6193 timer_interval = LPFC_CMF_INTERVAL;
6195 phba->hba_flag |= HBA_SHORT_CMF;
6197 /* If we adjust timer_interval, max_bytes_per_interval
6198 * needs to be adjusted as well.
6200 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6201 timer_interval, 1000);
6202 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6203 phba->cmf_max_bytes_per_interval =
6204 phba->cmf_link_byte_count;
6207 /* Since total_bytes has already been zero'ed, its okay to unblock
6208 * after max_bytes_per_interval is setup.
6210 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6211 queue_work(phba->wq, &phba->unblock_request_work);
6213 /* SCSI IO is now unblocked */
6214 atomic_set(&phba->cmf_stop_io, 0);
6217 hrtimer_forward_now(timer,
6218 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6219 return HRTIMER_RESTART;
6222 #define trunk_link_status(__idx)\
6223 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6224 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6225 "Link up" : "Link down") : "NA"
6226 /* Did port __idx reported an error */
6227 #define trunk_port_fault(__idx)\
6228 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6229 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6232 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6233 struct lpfc_acqe_fc_la *acqe_fc)
6235 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6236 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6238 phba->sli4_hba.link_state.speed =
6239 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6240 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6242 phba->sli4_hba.link_state.logical_speed =
6243 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6244 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6245 phba->fc_linkspeed =
6246 lpfc_async_link_speed_to_read_top(
6248 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6250 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6251 phba->trunk_link.link0.state =
6252 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6253 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6254 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6256 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6257 phba->trunk_link.link1.state =
6258 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6259 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6260 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6262 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6263 phba->trunk_link.link2.state =
6264 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6265 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6266 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6268 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6269 phba->trunk_link.link3.state =
6270 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6271 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6272 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6276 "2910 Async FC Trunking Event - Speed:%d\n"
6277 "\tLogical speed:%d "
6278 "port0: %s port1: %s port2: %s port3: %s\n",
6279 phba->sli4_hba.link_state.speed,
6280 phba->sli4_hba.link_state.logical_speed,
6281 trunk_link_status(0), trunk_link_status(1),
6282 trunk_link_status(2), trunk_link_status(3));
6284 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6285 lpfc_cmf_signal_init(phba);
6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6289 "3202 trunk error:0x%x (%s) seen on port0:%s "
6291 * SLI-4: We have only 0xA error codes
6292 * defined as of now. print an appropriate
6293 * message in case driver needs to be updated.
6295 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6296 "UNDEFINED. update driver." : trunk_errmsg[err],
6297 trunk_port_fault(0), trunk_port_fault(1),
6298 trunk_port_fault(2), trunk_port_fault(3));
6303 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6304 * @phba: pointer to lpfc hba data structure.
6305 * @acqe_fc: pointer to the async fc completion queue entry.
6307 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6308 * that the event was received and then issue a read_topology mailbox command so
6309 * that the rest of the driver will treat it the same as SLI3.
6312 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6316 struct lpfc_mbx_read_top *la;
6319 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6320 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6322 "2895 Non FC link Event detected.(%d)\n",
6323 bf_get(lpfc_trailer_type, acqe_fc));
6327 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6328 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6329 lpfc_update_trunk_link_status(phba, acqe_fc);
6333 /* Keep the link status for extra SLI4 state machine reference */
6334 phba->sli4_hba.link_state.speed =
6335 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6336 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6337 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6338 phba->sli4_hba.link_state.topology =
6339 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6340 phba->sli4_hba.link_state.status =
6341 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6342 phba->sli4_hba.link_state.type =
6343 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6344 phba->sli4_hba.link_state.number =
6345 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6346 phba->sli4_hba.link_state.fault =
6347 bf_get(lpfc_acqe_link_fault, acqe_fc);
6349 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6350 LPFC_FC_LA_TYPE_LINK_DOWN)
6351 phba->sli4_hba.link_state.logical_speed = 0;
6352 else if (!phba->sli4_hba.conf_trunk)
6353 phba->sli4_hba.link_state.logical_speed =
6354 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6356 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6357 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6358 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6359 "%dMbps Fault:%d\n",
6360 phba->sli4_hba.link_state.speed,
6361 phba->sli4_hba.link_state.topology,
6362 phba->sli4_hba.link_state.status,
6363 phba->sli4_hba.link_state.type,
6364 phba->sli4_hba.link_state.number,
6365 phba->sli4_hba.link_state.logical_speed,
6366 phba->sli4_hba.link_state.fault);
6367 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6370 "2897 The mboxq allocation failed\n");
6373 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376 "2898 The mboxq prep failed\n");
6380 /* Cleanup any outstanding ELS commands */
6381 lpfc_els_flush_all_cmd(phba);
6383 /* Block ELS IOCBs until we have done process link event */
6384 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6386 /* Update link event statistics */
6387 phba->sli.slistat.link_event++;
6389 /* Create lpfc_handle_latt mailbox command from link ACQE */
6390 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6391 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6392 pmb->vport = phba->pport;
6394 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6395 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6397 switch (phba->sli4_hba.link_state.status) {
6398 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6399 phba->link_flag |= LS_MDS_LINK_DOWN;
6401 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6402 phba->link_flag |= LS_MDS_LOOPBACK;
6408 /* Initialize completion status */
6410 mb->mbxStatus = MBX_SUCCESS;
6412 /* Parse port fault information field */
6413 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6415 /* Parse and translate link attention fields */
6416 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6417 la->eventTag = acqe_fc->event_tag;
6419 if (phba->sli4_hba.link_state.status ==
6420 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6421 bf_set(lpfc_mbx_read_top_att_type, la,
6422 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6424 bf_set(lpfc_mbx_read_top_att_type, la,
6425 LPFC_FC_LA_TYPE_LINK_DOWN);
6427 /* Invoke the mailbox command callback function */
6428 lpfc_mbx_cmpl_read_topology(phba, pmb);
6433 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6434 if (rc == MBX_NOT_FINISHED)
6439 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6443 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6444 * @phba: pointer to lpfc hba data structure.
6445 * @acqe_sli: pointer to the async SLI completion queue entry.
6447 * This routine is to handle the SLI4 asynchronous SLI events.
6450 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6456 uint8_t operational = 0;
6457 struct temp_event temp_event_data;
6458 struct lpfc_acqe_misconfigured_event *misconfigured;
6459 struct lpfc_acqe_cgn_signal *cgn_signal;
6460 struct Scsi_Host *shost;
6461 struct lpfc_vport **vports;
6464 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6466 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6467 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6468 "x%08x x%08x x%08x\n", evt_type,
6469 acqe_sli->event_data1, acqe_sli->event_data2,
6470 acqe_sli->reserved, acqe_sli->trailer);
6472 port_name = phba->Port[0];
6473 if (port_name == 0x00)
6474 port_name = '?'; /* get port name is empty */
6477 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6478 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6479 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6480 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6482 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6483 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6484 acqe_sli->event_data1, port_name);
6486 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6487 shost = lpfc_shost_from_vport(phba->pport);
6488 fc_host_post_vendor_event(shost, fc_get_event_number(),
6489 sizeof(temp_event_data),
6490 (char *)&temp_event_data,
6491 SCSI_NL_VID_TYPE_PCI
6492 | PCI_VENDOR_ID_EMULEX);
6494 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6495 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6496 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6497 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6499 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6500 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6501 acqe_sli->event_data1, port_name);
6503 shost = lpfc_shost_from_vport(phba->pport);
6504 fc_host_post_vendor_event(shost, fc_get_event_number(),
6505 sizeof(temp_event_data),
6506 (char *)&temp_event_data,
6507 SCSI_NL_VID_TYPE_PCI
6508 | PCI_VENDOR_ID_EMULEX);
6510 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6511 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6512 &acqe_sli->event_data1;
6514 /* fetch the status for this port */
6515 switch (phba->sli4_hba.lnk_info.lnk_no) {
6516 case LPFC_LINK_NUMBER_0:
6517 status = bf_get(lpfc_sli_misconfigured_port0_state,
6518 &misconfigured->theEvent);
6519 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6520 &misconfigured->theEvent);
6522 case LPFC_LINK_NUMBER_1:
6523 status = bf_get(lpfc_sli_misconfigured_port1_state,
6524 &misconfigured->theEvent);
6525 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6526 &misconfigured->theEvent);
6528 case LPFC_LINK_NUMBER_2:
6529 status = bf_get(lpfc_sli_misconfigured_port2_state,
6530 &misconfigured->theEvent);
6531 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6532 &misconfigured->theEvent);
6534 case LPFC_LINK_NUMBER_3:
6535 status = bf_get(lpfc_sli_misconfigured_port3_state,
6536 &misconfigured->theEvent);
6537 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6538 &misconfigured->theEvent);
6541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6543 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6544 "event: Invalid link %d",
6545 phba->sli4_hba.lnk_info.lnk_no);
6549 /* Skip if optic state unchanged */
6550 if (phba->sli4_hba.lnk_info.optic_state == status)
6554 case LPFC_SLI_EVENT_STATUS_VALID:
6555 sprintf(message, "Physical Link is functional");
6557 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6558 sprintf(message, "Optics faulted/incorrectly "
6559 "installed/not installed - Reseat optics, "
6560 "if issue not resolved, replace.");
6562 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6564 "Optics of two types installed - Remove one "
6565 "optic or install matching pair of optics.");
6567 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6568 sprintf(message, "Incompatible optics - Replace with "
6569 "compatible optics for card to function.");
6571 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6572 sprintf(message, "Unqualified optics - Replace with "
6573 "Avago optics for Warranty and Technical "
6574 "Support - Link is%s operational",
6575 (operational) ? " not" : "");
6577 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6578 sprintf(message, "Uncertified optics - Replace with "
6579 "Avago-certified optics to enable link "
6580 "operation - Link is%s operational",
6581 (operational) ? " not" : "");
6584 /* firmware is reporting a status we don't know about */
6585 sprintf(message, "Unknown event status x%02x", status);
6589 /* Issue READ_CONFIG mbox command to refresh supported speeds */
6590 rc = lpfc_sli4_read_config(phba);
6593 lpfc_printf_log(phba, KERN_ERR,
6595 "3194 Unable to retrieve supported "
6596 "speeds, rc = 0x%x\n", rc);
6598 rc = lpfc_sli4_refresh_params(phba);
6600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6601 "3174 Unable to update pls support, "
6604 vports = lpfc_create_vport_work_array(phba);
6605 if (vports != NULL) {
6606 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6608 shost = lpfc_shost_from_vport(vports[i]);
6609 lpfc_host_supported_speeds_set(shost);
6612 lpfc_destroy_vport_work_array(phba, vports);
6614 phba->sli4_hba.lnk_info.optic_state = status;
6615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6616 "3176 Port Name %c %s\n", port_name, message);
6618 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6619 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6620 "3192 Remote DPort Test Initiated - "
6621 "Event Data1:x%08x Event Data2: x%08x\n",
6622 acqe_sli->event_data1, acqe_sli->event_data2);
6624 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6625 /* Call FW to obtain active parms */
6626 lpfc_sli4_cgn_parm_chg_evt(phba);
6628 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6629 /* Misconfigured WWN. Reports that the SLI Port is configured
6630 * to use FA-WWN, but the attached device doesn’t support it.
6631 * Event Data1 - N.A, Event Data2 - N.A
6632 * This event only happens on the physical port.
6634 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6635 "2699 Misconfigured FA-PWWN - Attached device "
6636 "does not support FA-PWWN\n");
6637 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6638 memset(phba->pport->fc_portname.u.wwn, 0,
6639 sizeof(struct lpfc_name));
6641 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6642 /* EEPROM failure. No driver action is required */
6643 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6644 "2518 EEPROM failure - "
6645 "Event Data1: x%08x Event Data2: x%08x\n",
6646 acqe_sli->event_data1, acqe_sli->event_data2);
6648 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6649 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6651 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6652 &acqe_sli->event_data1;
6653 phba->cgn_acqe_cnt++;
6655 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6656 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6657 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6659 /* no threshold for CMF, even 1 signal will trigger an event */
6661 /* Alarm overrides warning, so check that first */
6662 if (cgn_signal->alarm_cnt) {
6663 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6664 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6665 atomic_add(cgn_signal->alarm_cnt,
6666 &phba->cgn_sync_alarm_cnt);
6669 /* signal action needs to be taken */
6670 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6671 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6672 /* Keep track of warning cnt for CMF_SYNC_WQE */
6673 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6678 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6679 "3193 Unrecognized SLI event, type: 0x%x",
6686 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6687 * @vport: pointer to vport data structure.
6689 * This routine is to perform Clear Virtual Link (CVL) on a vport in
6690 * response to a CVL event.
6692 * Return the pointer to the ndlp with the vport if successful, otherwise
6695 static struct lpfc_nodelist *
6696 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6698 struct lpfc_nodelist *ndlp;
6699 struct Scsi_Host *shost;
6700 struct lpfc_hba *phba;
6707 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6709 /* Cannot find existing Fabric ndlp, so allocate a new one */
6710 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6713 /* Set the node type */
6714 ndlp->nlp_type |= NLP_FABRIC;
6715 /* Put ndlp onto node list */
6716 lpfc_enqueue_node(vport, ndlp);
6718 if ((phba->pport->port_state < LPFC_FLOGI) &&
6719 (phba->pport->port_state != LPFC_VPORT_FAILED))
6721 /* If virtual link is not yet instantiated ignore CVL */
6722 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6723 && (vport->port_state != LPFC_VPORT_FAILED))
6725 shost = lpfc_shost_from_vport(vport);
6728 lpfc_linkdown_port(vport);
6729 lpfc_cleanup_pending_mbox(vport);
6730 spin_lock_irq(shost->host_lock);
6731 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6732 spin_unlock_irq(shost->host_lock);
6738 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6739 * @phba: pointer to lpfc hba data structure.
6741 * This routine is to perform Clear Virtual Link (CVL) on all vports in
6742 * response to a FCF dead event.
6745 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6747 struct lpfc_vport **vports;
6750 vports = lpfc_create_vport_work_array(phba);
6752 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6753 lpfc_sli4_perform_vport_cvl(vports[i]);
6754 lpfc_destroy_vport_work_array(phba, vports);
6758 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6759 * @phba: pointer to lpfc hba data structure.
6760 * @acqe_fip: pointer to the async fcoe completion queue entry.
6762 * This routine is to handle the SLI4 asynchronous fcoe event.
6765 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6766 struct lpfc_acqe_fip *acqe_fip)
6768 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6770 struct lpfc_vport *vport;
6771 struct lpfc_nodelist *ndlp;
6772 int active_vlink_present;
6773 struct lpfc_vport **vports;
6776 phba->fc_eventTag = acqe_fip->event_tag;
6777 phba->fcoe_eventtag = acqe_fip->event_tag;
6778 switch (event_type) {
6779 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6780 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6781 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6783 "2546 New FCF event, evt_tag:x%x, "
6785 acqe_fip->event_tag,
6788 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6790 "2788 FCF param modified event, "
6791 "evt_tag:x%x, index:x%x\n",
6792 acqe_fip->event_tag,
6794 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6796 * During period of FCF discovery, read the FCF
6797 * table record indexed by the event to update
6798 * FCF roundrobin failover eligible FCF bmask.
6800 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6802 "2779 Read FCF (x%x) for updating "
6803 "roundrobin FCF failover bmask\n",
6805 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6808 /* If the FCF discovery is in progress, do nothing. */
6809 spin_lock_irq(&phba->hbalock);
6810 if (phba->hba_flag & FCF_TS_INPROG) {
6811 spin_unlock_irq(&phba->hbalock);
6814 /* If fast FCF failover rescan event is pending, do nothing */
6815 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6816 spin_unlock_irq(&phba->hbalock);
6820 /* If the FCF has been in discovered state, do nothing. */
6821 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6822 spin_unlock_irq(&phba->hbalock);
6825 spin_unlock_irq(&phba->hbalock);
6827 /* Otherwise, scan the entire FCF table and re-discover SAN */
6828 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6829 "2770 Start FCF table scan per async FCF "
6830 "event, evt_tag:x%x, index:x%x\n",
6831 acqe_fip->event_tag, acqe_fip->index);
6832 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6833 LPFC_FCOE_FCF_GET_FIRST);
6835 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6836 "2547 Issue FCF scan read FCF mailbox "
6837 "command failed (x%x)\n", rc);
6840 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6842 "2548 FCF Table full count 0x%x tag 0x%x\n",
6843 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6844 acqe_fip->event_tag);
6847 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6848 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6850 "2549 FCF (x%x) disconnected from network, "
6851 "tag:x%x\n", acqe_fip->index,
6852 acqe_fip->event_tag);
6854 * If we are in the middle of FCF failover process, clear
6855 * the corresponding FCF bit in the roundrobin bitmap.
6857 spin_lock_irq(&phba->hbalock);
6858 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6859 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6860 spin_unlock_irq(&phba->hbalock);
6861 /* Update FLOGI FCF failover eligible FCF bmask */
6862 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6865 spin_unlock_irq(&phba->hbalock);
6867 /* If the event is not for currently used fcf do nothing */
6868 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6872 * Otherwise, request the port to rediscover the entire FCF
6873 * table for a fast recovery from case that the current FCF
6874 * is no longer valid as we are not in the middle of FCF
6875 * failover process already.
6877 spin_lock_irq(&phba->hbalock);
6878 /* Mark the fast failover process in progress */
6879 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6880 spin_unlock_irq(&phba->hbalock);
6882 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6883 "2771 Start FCF fast failover process due to "
6884 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6885 "\n", acqe_fip->event_tag, acqe_fip->index);
6886 rc = lpfc_sli4_redisc_fcf_table(phba);
6888 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6890 "2772 Issue FCF rediscover mailbox "
6891 "command failed, fail through to FCF "
6893 spin_lock_irq(&phba->hbalock);
6894 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6895 spin_unlock_irq(&phba->hbalock);
6897 * Last resort will fail over by treating this
6898 * as a link down to FCF registration.
6900 lpfc_sli4_fcf_dead_failthrough(phba);
6902 /* Reset FCF roundrobin bmask for new discovery */
6903 lpfc_sli4_clear_fcf_rr_bmask(phba);
6905 * Handling fast FCF failover to a DEAD FCF event is
6906 * considered equalivant to receiving CVL to all vports.
6908 lpfc_sli4_perform_all_vport_cvl(phba);
6911 case LPFC_FIP_EVENT_TYPE_CVL:
6912 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6913 lpfc_printf_log(phba, KERN_ERR,
6915 "2718 Clear Virtual Link Received for VPI 0x%x"
6916 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6918 vport = lpfc_find_vport_by_vpid(phba,
6920 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6923 active_vlink_present = 0;
6925 vports = lpfc_create_vport_work_array(phba);
6927 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6929 if ((!(vports[i]->fc_flag &
6930 FC_VPORT_CVL_RCVD)) &&
6931 (vports[i]->port_state > LPFC_FDISC)) {
6932 active_vlink_present = 1;
6936 lpfc_destroy_vport_work_array(phba, vports);
6940 * Don't re-instantiate if vport is marked for deletion.
6941 * If we are here first then vport_delete is going to wait
6942 * for discovery to complete.
6944 if (!(vport->load_flag & FC_UNLOADING) &&
6945 active_vlink_present) {
6947 * If there are other active VLinks present,
6948 * re-instantiate the Vlink using FDISC.
6950 mod_timer(&ndlp->nlp_delayfunc,
6951 jiffies + msecs_to_jiffies(1000));
6952 spin_lock_irq(&ndlp->lock);
6953 ndlp->nlp_flag |= NLP_DELAY_TMO;
6954 spin_unlock_irq(&ndlp->lock);
6955 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6956 vport->port_state = LPFC_FDISC;
6959 * Otherwise, we request port to rediscover
6960 * the entire FCF table for a fast recovery
6961 * from possible case that the current FCF
6962 * is no longer valid if we are not already
6963 * in the FCF failover process.
6965 spin_lock_irq(&phba->hbalock);
6966 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6967 spin_unlock_irq(&phba->hbalock);
6970 /* Mark the fast failover process in progress */
6971 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6972 spin_unlock_irq(&phba->hbalock);
6973 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6975 "2773 Start FCF failover per CVL, "
6976 "evt_tag:x%x\n", acqe_fip->event_tag);
6977 rc = lpfc_sli4_redisc_fcf_table(phba);
6979 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6981 "2774 Issue FCF rediscover "
6982 "mailbox command failed, "
6983 "through to CVL event\n");
6984 spin_lock_irq(&phba->hbalock);
6985 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6986 spin_unlock_irq(&phba->hbalock);
6988 * Last resort will be re-try on the
6989 * the current registered FCF entry.
6991 lpfc_retry_pport_discovery(phba);
6994 * Reset FCF roundrobin bmask for new
6997 lpfc_sli4_clear_fcf_rr_bmask(phba);
7001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7002 "0288 Unknown FCoE event type 0x%x event tag "
7003 "0x%x\n", event_type, acqe_fip->event_tag);
7009 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
7010 * @phba: pointer to lpfc hba data structure.
7011 * @acqe_dcbx: pointer to the async dcbx completion queue entry.
7013 * This routine is to handle the SLI4 asynchronous dcbx event.
7016 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7017 struct lpfc_acqe_dcbx *acqe_dcbx)
7019 phba->fc_eventTag = acqe_dcbx->event_tag;
7020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7021 "0290 The SLI4 DCBX asynchronous event is not "
7026 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7027 * @phba: pointer to lpfc hba data structure.
7028 * @acqe_grp5: pointer to the async grp5 completion queue entry.
7030 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7031 * is an asynchronous notified of a logical link speed change. The Port
7032 * reports the logical link speed in units of 10Mbps.
7035 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7036 struct lpfc_acqe_grp5 *acqe_grp5)
7038 uint16_t prev_ll_spd;
7040 phba->fc_eventTag = acqe_grp5->event_tag;
7041 phba->fcoe_eventtag = acqe_grp5->event_tag;
7042 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7043 phba->sli4_hba.link_state.logical_speed =
7044 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7045 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7046 "2789 GRP5 Async Event: Updating logical link speed "
7047 "from %dMbps to %dMbps\n", prev_ll_spd,
7048 phba->sli4_hba.link_state.logical_speed);
7052 * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7053 * @phba: pointer to lpfc hba data structure.
7055 * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7056 * is an asynchronous notification of a request to reset CM stats.
7059 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7063 lpfc_init_congestion_stat(phba);
7067 * lpfc_cgn_params_val - Validate FW congestion parameters.
7068 * @phba: pointer to lpfc hba data structure.
7069 * @p_cfg_param: pointer to FW provided congestion parameters.
7071 * This routine validates the congestion parameters passed
7072 * by the FW to the driver via an ACQE event.
7075 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7077 spin_lock_irq(&phba->hbalock);
7079 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7080 LPFC_CFG_MONITOR)) {
7081 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7082 "6225 CMF mode param out of range: %d\n",
7083 p_cfg_param->cgn_param_mode);
7084 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7087 spin_unlock_irq(&phba->hbalock);
7091 * lpfc_cgn_params_parse - Process a FW cong parm change event
7092 * @phba: pointer to lpfc hba data structure.
7093 * @p_cgn_param: pointer to a data buffer with the FW cong params.
7094 * @len: the size of pdata in bytes.
7096 * This routine validates the congestion management buffer signature
7097 * from the FW, validates the contents and makes corrections for
7098 * valid, in-range values. If the signature magic is correct and
7099 * after parameter validation, the contents are copied to the driver's
7100 * @phba structure. If the magic is incorrect, an error message is
7104 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7105 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7107 struct lpfc_cgn_info *cp;
7108 uint32_t crc, oldmode;
7110 /* Make sure the FW has encoded the correct magic number to
7111 * validate the congestion parameter in FW memory.
7113 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7114 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7115 "4668 FW cgn parm buffer data: "
7116 "magic 0x%x version %d mode %d "
7117 "level0 %d level1 %d "
7118 "level2 %d byte13 %d "
7119 "byte14 %d byte15 %d "
7120 "byte11 %d byte12 %d activeMode %d\n",
7121 p_cgn_param->cgn_param_magic,
7122 p_cgn_param->cgn_param_version,
7123 p_cgn_param->cgn_param_mode,
7124 p_cgn_param->cgn_param_level0,
7125 p_cgn_param->cgn_param_level1,
7126 p_cgn_param->cgn_param_level2,
7127 p_cgn_param->byte13,
7128 p_cgn_param->byte14,
7129 p_cgn_param->byte15,
7130 p_cgn_param->byte11,
7131 p_cgn_param->byte12,
7132 phba->cmf_active_mode);
7134 oldmode = phba->cmf_active_mode;
7136 /* Any parameters out of range are corrected to defaults
7137 * by this routine. No need to fail.
7139 lpfc_cgn_params_val(phba, p_cgn_param);
7141 /* Parameters are verified, move them into driver storage */
7142 spin_lock_irq(&phba->hbalock);
7143 memcpy(&phba->cgn_p, p_cgn_param,
7144 sizeof(struct lpfc_cgn_param));
7146 /* Update parameters in congestion info buffer now */
7148 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7149 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7150 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7151 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7152 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7153 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7154 LPFC_CGN_CRC32_SEED);
7155 cp->cgn_info_crc = cpu_to_le32(crc);
7157 spin_unlock_irq(&phba->hbalock);
7159 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7163 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7164 /* Turning CMF on */
7165 lpfc_cmf_start(phba);
7167 if (phba->link_state >= LPFC_LINK_UP) {
7168 phba->cgn_reg_fpin =
7169 phba->cgn_init_reg_fpin;
7170 phba->cgn_reg_signal =
7171 phba->cgn_init_reg_signal;
7172 lpfc_issue_els_edc(phba->pport, 0);
7176 case LPFC_CFG_MANAGED:
7177 switch (phba->cgn_p.cgn_param_mode) {
7179 /* Turning CMF off */
7180 lpfc_cmf_stop(phba);
7181 if (phba->link_state >= LPFC_LINK_UP)
7182 lpfc_issue_els_edc(phba->pport, 0);
7184 case LPFC_CFG_MONITOR:
7185 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7186 "4661 Switch from MANAGED to "
7188 phba->cmf_max_bytes_per_interval =
7189 phba->cmf_link_byte_count;
7191 /* Resume blocked IO - unblock on workqueue */
7192 queue_work(phba->wq,
7193 &phba->unblock_request_work);
7197 case LPFC_CFG_MONITOR:
7198 switch (phba->cgn_p.cgn_param_mode) {
7200 /* Turning CMF off */
7201 lpfc_cmf_stop(phba);
7202 if (phba->link_state >= LPFC_LINK_UP)
7203 lpfc_issue_els_edc(phba->pport, 0);
7205 case LPFC_CFG_MANAGED:
7206 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7207 "4662 Switch from MONITOR to "
7209 lpfc_cmf_signal_init(phba);
7215 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7216 "4669 FW cgn parm buf wrong magic 0x%x "
7217 "version %d\n", p_cgn_param->cgn_param_magic,
7218 p_cgn_param->cgn_param_version);
7223 * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7224 * @phba: pointer to lpfc hba data structure.
7226 * This routine issues a read_object mailbox command to
7227 * get the congestion management parameters from the FW
7228 * parses it and updates the driver maintained values.
7231 * 0 if the object was empty
7232 * -Eval if an error was encountered
7233 * Count if bytes were read from object
7236 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7239 struct lpfc_cgn_param *p_cgn_param = NULL;
7243 /* Find out if the FW has a new set of congestion parameters. */
7244 len = sizeof(struct lpfc_cgn_param);
7245 pdata = kzalloc(len, GFP_KERNEL);
7246 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7249 /* 0 means no data. A negative means error. A positive means
7250 * bytes were copied.
7253 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7254 "4670 CGN RD OBJ returns no data\n");
7256 } else if (ret < 0) {
7257 /* Some error. Just exit and return it to the caller.*/
7261 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7262 "6234 READ CGN PARAMS Successful %d\n", len);
7264 /* Parse data pointer over len and update the phba congestion
7265 * parameters with values passed back. The receive rate values
7266 * may have been altered in FW, but take no action here.
7268 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7269 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7277 * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7278 * @phba: pointer to lpfc hba data structure.
7280 * The FW generated Async ACQE SLI event calls this routine when
7281 * the event type is an SLI Internal Port Event and the Event Code
7282 * indicates a change to the FW maintained congestion parameters.
7284 * This routine executes a Read_Object mailbox call to obtain the
7285 * current congestion parameters maintained in FW and corrects
7286 * the driver's active congestion parameters.
7288 * The acqe event is not passed because there is no further data
7291 * Returns nonzero error if event processing encountered an error.
7292 * Zero otherwise for success.
7295 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7299 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7300 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7301 "4664 Cgn Evt when E2E off. Drop event\n");
7305 /* If the event is claiming an empty object, it's ok. A write
7306 * could have cleared it. Only error is a negative return
7309 ret = lpfc_sli4_cgn_params_read(phba);
7311 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7312 "4667 Error reading Cgn Params (%d)\n",
7315 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7316 "4673 CGN Event empty object.\n");
7322 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7323 * @phba: pointer to lpfc hba data structure.
7325 * This routine is invoked by the worker thread to process all the pending
7326 * SLI4 asynchronous events.
7328 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7330 struct lpfc_cq_event *cq_event;
7331 unsigned long iflags;
7333 /* First, declare the async event has been handled */
7334 spin_lock_irqsave(&phba->hbalock, iflags);
7335 phba->hba_flag &= ~ASYNC_EVENT;
7336 spin_unlock_irqrestore(&phba->hbalock, iflags);
7338 /* Now, handle all the async events */
7339 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7340 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7341 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7342 cq_event, struct lpfc_cq_event, list);
7343 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7346 /* Process the asynchronous event */
7347 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7348 case LPFC_TRAILER_CODE_LINK:
7349 lpfc_sli4_async_link_evt(phba,
7350 &cq_event->cqe.acqe_link);
7352 case LPFC_TRAILER_CODE_FCOE:
7353 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7355 case LPFC_TRAILER_CODE_DCBX:
7356 lpfc_sli4_async_dcbx_evt(phba,
7357 &cq_event->cqe.acqe_dcbx);
7359 case LPFC_TRAILER_CODE_GRP5:
7360 lpfc_sli4_async_grp5_evt(phba,
7361 &cq_event->cqe.acqe_grp5);
7363 case LPFC_TRAILER_CODE_FC:
7364 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7366 case LPFC_TRAILER_CODE_SLI:
7367 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7369 case LPFC_TRAILER_CODE_CMSTAT:
7370 lpfc_sli4_async_cmstat_evt(phba);
7373 lpfc_printf_log(phba, KERN_ERR,
7375 "1804 Invalid asynchronous event code: "
7376 "x%x\n", bf_get(lpfc_trailer_code,
7377 &cq_event->cqe.mcqe_cmpl));
7381 /* Free the completion event processed to the free pool */
7382 lpfc_sli4_cq_event_release(phba, cq_event);
7383 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7385 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7389 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7390 * @phba: pointer to lpfc hba data structure.
7392 * This routine is invoked by the worker thread to process FCF table
7393 * rediscovery pending completion event.
7395 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7399 spin_lock_irq(&phba->hbalock);
7400 /* Clear FCF rediscovery timeout event */
7401 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7402 /* Clear driver fast failover FCF record flag */
7403 phba->fcf.failover_rec.flag = 0;
7404 /* Set state for FCF fast failover */
7405 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7406 spin_unlock_irq(&phba->hbalock);
7408 /* Scan FCF table from the first entry to re-discover SAN */
7409 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7410 "2777 Start post-quiescent FCF table scan\n");
7411 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7414 "2747 Issue FCF scan read FCF mailbox "
7415 "command failed 0x%x\n", rc);
7419 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7420 * @phba: pointer to lpfc hba data structure.
7421 * @dev_grp: The HBA PCI-Device group number.
7423 * This routine is invoked to set up the per HBA PCI-Device group function
7424 * API jump table entries.
7426 * Return: 0 if success, otherwise -ENODEV
7429 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7433 /* Set up lpfc PCI-device group */
7434 phba->pci_dev_grp = dev_grp;
7436 /* The LPFC_PCI_DEV_OC uses SLI4 */
7437 if (dev_grp == LPFC_PCI_DEV_OC)
7438 phba->sli_rev = LPFC_SLI_REV4;
7440 /* Set up device INIT API function jump table */
7441 rc = lpfc_init_api_table_setup(phba, dev_grp);
7444 /* Set up SCSI API function jump table */
7445 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7448 /* Set up SLI API function jump table */
7449 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7452 /* Set up MBOX API function jump table */
7453 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7461 * lpfc_log_intr_mode - Log the active interrupt mode
7462 * @phba: pointer to lpfc hba data structure.
7463 * @intr_mode: active interrupt mode adopted.
7465 * This routine it invoked to log the currently used active interrupt mode
7468 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7470 switch (intr_mode) {
7472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7473 "0470 Enable INTx interrupt mode.\n");
7476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7477 "0481 Enabled MSI interrupt mode.\n");
7480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7481 "0480 Enabled MSI-X interrupt mode.\n");
7484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7485 "0482 Illegal interrupt mode.\n");
7492 * lpfc_enable_pci_dev - Enable a generic PCI device.
7493 * @phba: pointer to lpfc hba data structure.
7495 * This routine is invoked to enable the PCI device that is common to all
7500 * other values - error
7503 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7505 struct pci_dev *pdev;
7507 /* Obtain PCI device reference */
7511 pdev = phba->pcidev;
7512 /* Enable PCI device */
7513 if (pci_enable_device_mem(pdev))
7515 /* Request PCI resource for the device */
7516 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7517 goto out_disable_device;
7518 /* Set up device as PCI master and save state for EEH */
7519 pci_set_master(pdev);
7520 pci_try_set_mwi(pdev);
7521 pci_save_state(pdev);
7523 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7524 if (pci_is_pcie(pdev))
7525 pdev->needs_freset = 1;
7530 pci_disable_device(pdev);
7532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7533 "1401 Failed to enable pci device\n");
7538 * lpfc_disable_pci_dev - Disable a generic PCI device.
7539 * @phba: pointer to lpfc hba data structure.
7541 * This routine is invoked to disable the PCI device that is common to all
7545 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7547 struct pci_dev *pdev;
7549 /* Obtain PCI device reference */
7553 pdev = phba->pcidev;
7554 /* Release PCI resource and disable PCI device */
7555 pci_release_mem_regions(pdev);
7556 pci_disable_device(pdev);
7562 * lpfc_reset_hba - Reset a hba
7563 * @phba: pointer to lpfc hba data structure.
7565 * This routine is invoked to reset a hba device. It brings the HBA
7566 * offline, performs a board restart, and then brings the board back
7567 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7568 * on outstanding mailbox commands.
7571 lpfc_reset_hba(struct lpfc_hba *phba)
7573 /* If resets are disabled then set error state and return. */
7574 if (!phba->cfg_enable_hba_reset) {
7575 phba->link_state = LPFC_HBA_ERROR;
7579 /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7580 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7581 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7583 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7584 lpfc_sli_flush_io_rings(phba);
7587 lpfc_sli_brdrestart(phba);
7589 lpfc_unblock_mgmt_io(phba);
7593 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7594 * @phba: pointer to lpfc hba data structure.
7596 * This function enables the PCI SR-IOV virtual functions to a physical
7597 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7598 * enable the number of virtual functions to the physical function. As
7599 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7600 * API call does not considered as an error condition for most of the device.
7603 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7605 struct pci_dev *pdev = phba->pcidev;
7609 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7613 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7618 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7619 * @phba: pointer to lpfc hba data structure.
7620 * @nr_vfn: number of virtual functions to be enabled.
7622 * This function enables the PCI SR-IOV virtual functions to a physical
7623 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7624 * enable the number of virtual functions to the physical function. As
7625 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7626 * API call does not considered as an error condition for most of the device.
7629 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7631 struct pci_dev *pdev = phba->pcidev;
7632 uint16_t max_nr_vfn;
7635 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7636 if (nr_vfn > max_nr_vfn) {
7637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7638 "3057 Requested vfs (%d) greater than "
7639 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7643 rc = pci_enable_sriov(pdev, nr_vfn);
7645 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7646 "2806 Failed to enable sriov on this device "
7647 "with vfn number nr_vf:%d, rc:%d\n",
7650 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7651 "2807 Successful enable sriov on this device "
7652 "with vfn number nr_vf:%d\n", nr_vfn);
7657 lpfc_unblock_requests_work(struct work_struct *work)
7659 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7660 unblock_request_work);
7662 lpfc_unblock_requests(phba);
7666 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7667 * @phba: pointer to lpfc hba data structure.
7669 * This routine is invoked to set up the driver internal resources before the
7670 * device specific resource setup to support the HBA device it attached to.
7674 * other values - error
7677 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7679 struct lpfc_sli *psli = &phba->sli;
7682 * Driver resources common to all SLI revisions
7684 atomic_set(&phba->fast_event_count, 0);
7685 atomic_set(&phba->dbg_log_idx, 0);
7686 atomic_set(&phba->dbg_log_cnt, 0);
7687 atomic_set(&phba->dbg_log_dmping, 0);
7688 spin_lock_init(&phba->hbalock);
7690 /* Initialize port_list spinlock */
7691 spin_lock_init(&phba->port_list_lock);
7692 INIT_LIST_HEAD(&phba->port_list);
7694 INIT_LIST_HEAD(&phba->work_list);
7695 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7697 /* Initialize the wait queue head for the kernel thread */
7698 init_waitqueue_head(&phba->work_waitq);
7700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7701 "1403 Protocols supported %s %s %s\n",
7702 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7704 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7706 (phba->nvmet_support ? "NVMET" : " "));
7708 /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7709 spin_lock_init(&phba->scsi_buf_list_get_lock);
7710 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7711 spin_lock_init(&phba->scsi_buf_list_put_lock);
7712 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7714 /* Initialize the fabric iocb list */
7715 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7717 /* Initialize list to save ELS buffers */
7718 INIT_LIST_HEAD(&phba->elsbuf);
7720 /* Initialize FCF connection rec list */
7721 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7723 /* Initialize OAS configuration list */
7724 spin_lock_init(&phba->devicelock);
7725 INIT_LIST_HEAD(&phba->luns);
7727 /* MBOX heartbeat timer */
7728 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7729 /* Fabric block timer */
7730 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7731 /* EA polling mode timer */
7732 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7733 /* Heartbeat timer */
7734 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7736 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7738 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7739 lpfc_idle_stat_delay_work);
7740 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7745 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7746 * @phba: pointer to lpfc hba data structure.
7748 * This routine is invoked to set up the driver internal resources specific to
7749 * support the SLI-3 HBA device it attached to.
7753 * other values - error
7756 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7761 * Initialize timers used by driver
7764 /* FCP polling mode timer */
7765 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7767 /* Host attention work mask setup */
7768 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7769 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7771 /* Get all the module params for configuring this host */
7772 lpfc_get_cfgparam(phba);
7773 /* Set up phase-1 common device driver resources */
7775 rc = lpfc_setup_driver_resource_phase1(phba);
7779 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7780 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7781 /* check for menlo minimum sg count */
7782 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7783 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7786 if (!phba->sli.sli3_ring)
7787 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7788 sizeof(struct lpfc_sli_ring),
7790 if (!phba->sli.sli3_ring)
7794 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7795 * used to create the sg_dma_buf_pool must be dynamically calculated.
7798 if (phba->sli_rev == LPFC_SLI_REV4)
7799 entry_sz = sizeof(struct sli4_sge);
7801 entry_sz = sizeof(struct ulp_bde64);
7803 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7804 if (phba->cfg_enable_bg) {
7806 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7807 * the FCP rsp, and a BDE for each. Sice we have no control
7808 * over how many protection data segments the SCSI Layer
7809 * will hand us (ie: there could be one for every block
7810 * in the IO), we just allocate enough BDEs to accomidate
7811 * our max amount and we need to limit lpfc_sg_seg_cnt to
7812 * minimize the risk of running out.
7814 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7815 sizeof(struct fcp_rsp) +
7816 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7818 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7819 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7821 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7822 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7825 * The scsi_buf for a regular I/O will hold the FCP cmnd,
7826 * the FCP rsp, a BDE for each, and a BDE for up to
7827 * cfg_sg_seg_cnt data segments.
7829 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7830 sizeof(struct fcp_rsp) +
7831 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7833 /* Total BDEs in BPL for scsi_sg_list */
7834 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7837 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7838 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7839 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7840 phba->cfg_total_seg_cnt);
7842 phba->max_vpi = LPFC_MAX_VPI;
7843 /* This will be set to correct value after config_port mbox */
7844 phba->max_vports = 0;
7847 * Initialize the SLI Layer to run with lpfc HBAs.
7849 lpfc_sli_setup(phba);
7850 lpfc_sli_queue_init(phba);
7852 /* Allocate device driver memory */
7853 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7856 phba->lpfc_sg_dma_buf_pool =
7857 dma_pool_create("lpfc_sg_dma_buf_pool",
7858 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7861 if (!phba->lpfc_sg_dma_buf_pool)
7864 phba->lpfc_cmd_rsp_buf_pool =
7865 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7867 sizeof(struct fcp_cmnd) +
7868 sizeof(struct fcp_rsp),
7871 if (!phba->lpfc_cmd_rsp_buf_pool)
7872 goto fail_free_dma_buf_pool;
7875 * Enable sr-iov virtual functions if supported and configured
7876 * through the module parameter.
7878 if (phba->cfg_sriov_nr_virtfn > 0) {
7879 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7880 phba->cfg_sriov_nr_virtfn);
7882 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7883 "2808 Requested number of SR-IOV "
7884 "virtual functions (%d) is not "
7886 phba->cfg_sriov_nr_virtfn);
7887 phba->cfg_sriov_nr_virtfn = 0;
7893 fail_free_dma_buf_pool:
7894 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7895 phba->lpfc_sg_dma_buf_pool = NULL;
7897 lpfc_mem_free(phba);
7902 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7903 * @phba: pointer to lpfc hba data structure.
7905 * This routine is invoked to unset the driver internal resources set up
7906 * specific for supporting the SLI-3 HBA device it attached to.
7909 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7911 /* Free device driver memory allocated */
7912 lpfc_mem_free_all(phba);
7918 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7919 * @phba: pointer to lpfc hba data structure.
7921 * This routine is invoked to set up the driver internal resources specific to
7922 * support the SLI-4 HBA device it attached to.
7926 * other values - error
7929 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7931 LPFC_MBOXQ_t *mboxq;
7933 int rc, i, max_buf_size;
7940 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7941 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7942 phba->sli4_hba.curr_disp_cpu = 0;
7944 /* Get all the module params for configuring this host */
7945 lpfc_get_cfgparam(phba);
7947 /* Set up phase-1 common device driver resources */
7948 rc = lpfc_setup_driver_resource_phase1(phba);
7952 /* Before proceed, wait for POST done and device ready */
7953 rc = lpfc_sli4_post_status_check(phba);
7957 /* Allocate all driver workqueues here */
7959 /* The lpfc_wq workqueue for deferred irq use */
7960 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7963 * Initialize timers used by driver
7966 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7968 /* FCF rediscover timer */
7969 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7971 /* CMF congestion timer */
7972 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7973 phba->cmf_timer.function = lpfc_cmf_timer;
7976 * Control structure for handling external multi-buffer mailbox
7977 * command pass-through.
7979 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7980 sizeof(struct lpfc_mbox_ext_buf_ctx));
7981 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7983 phba->max_vpi = LPFC_MAX_VPI;
7985 /* This will be set to correct value after the read_config mbox */
7986 phba->max_vports = 0;
7988 /* Program the default value of vlan_id and fc_map */
7989 phba->valid_vlan = 0;
7990 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7991 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7992 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7995 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7996 * we will associate a new ring, for each EQ/CQ/WQ tuple.
7997 * The WQ create will allocate the ring.
8000 /* Initialize buffer queue management fields */
8001 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
8002 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
8003 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
8005 /* for VMID idle timeout if VMID is enabled */
8006 if (lpfc_is_vmid_enabled(phba))
8007 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8010 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8012 /* Initialize the Abort buffer list used by driver */
8013 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8014 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8016 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8017 /* Initialize the Abort nvme buffer list used by driver */
8018 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8019 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8020 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8021 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8022 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8025 /* This abort list used by worker thread */
8026 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8027 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8028 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8029 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8032 * Initialize driver internal slow-path work queues
8035 /* Driver internel slow-path CQ Event pool */
8036 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8037 /* Response IOCB work queue list */
8038 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8039 /* Asynchronous event CQ Event work queue list */
8040 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8041 /* Slow-path XRI aborted CQ Event work queue list */
8042 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8043 /* Receive queue CQ Event work queue list */
8044 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8046 /* Initialize extent block lists. */
8047 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8048 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8049 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8050 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8052 /* Initialize mboxq lists. If the early init routines fail
8053 * these lists need to be correctly initialized.
8055 INIT_LIST_HEAD(&phba->sli.mboxq);
8056 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8058 /* initialize optic_state to 0xFF */
8059 phba->sli4_hba.lnk_info.optic_state = 0xff;
8061 /* Allocate device driver memory */
8062 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8066 /* IF Type 2 ports get initialized now. */
8067 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8068 LPFC_SLI_INTF_IF_TYPE_2) {
8069 rc = lpfc_pci_function_reset(phba);
8074 phba->temp_sensor_support = 1;
8077 /* Create the bootstrap mailbox command */
8078 rc = lpfc_create_bootstrap_mbox(phba);
8082 /* Set up the host's endian order with the device. */
8083 rc = lpfc_setup_endian_order(phba);
8085 goto out_free_bsmbx;
8087 /* Set up the hba's configuration parameters. */
8088 rc = lpfc_sli4_read_config(phba);
8090 goto out_free_bsmbx;
8092 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8093 /* Right now the link is down, if FA-PWWN is configured the
8094 * firmware will try FLOGI before the driver gets a link up.
8095 * If it fails, the driver should get a MISCONFIGURED async
8096 * event which will clear this flag. The only notification
8097 * the driver gets is if it fails, if it succeeds there is no
8098 * notification given. Assume success.
8100 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8103 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8105 goto out_free_bsmbx;
8107 /* IF Type 0 ports get initialized now. */
8108 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8109 LPFC_SLI_INTF_IF_TYPE_0) {
8110 rc = lpfc_pci_function_reset(phba);
8112 goto out_free_bsmbx;
8115 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8119 goto out_free_bsmbx;
8122 /* Check for NVMET being configured */
8123 phba->nvmet_support = 0;
8124 if (lpfc_enable_nvmet_cnt) {
8126 /* First get WWN of HBA instance */
8127 lpfc_read_nv(phba, mboxq);
8128 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8129 if (rc != MBX_SUCCESS) {
8130 lpfc_printf_log(phba, KERN_ERR,
8132 "6016 Mailbox failed , mbxCmd x%x "
8133 "READ_NV, mbxStatus x%x\n",
8134 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8135 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8136 mempool_free(mboxq, phba->mbox_mem_pool);
8138 goto out_free_bsmbx;
8141 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8143 wwn = cpu_to_be64(wwn);
8144 phba->sli4_hba.wwnn.u.name = wwn;
8145 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8147 /* wwn is WWPN of HBA instance */
8148 wwn = cpu_to_be64(wwn);
8149 phba->sli4_hba.wwpn.u.name = wwn;
8151 /* Check to see if it matches any module parameter */
8152 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8153 if (wwn == lpfc_enable_nvmet[i]) {
8154 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8155 if (lpfc_nvmet_mem_alloc(phba))
8158 phba->nvmet_support = 1; /* a match */
8160 lpfc_printf_log(phba, KERN_ERR,
8162 "6017 NVME Target %016llx\n",
8165 lpfc_printf_log(phba, KERN_ERR,
8167 "6021 Can't enable NVME Target."
8168 " NVME_TARGET_FC infrastructure"
8169 " is not in kernel\n");
8171 /* Not supported for NVMET */
8172 phba->cfg_xri_rebalancing = 0;
8173 if (phba->irq_chann_mode == NHT_MODE) {
8174 phba->cfg_irq_chann =
8175 phba->sli4_hba.num_present_cpu;
8176 phba->cfg_hdw_queue =
8177 phba->sli4_hba.num_present_cpu;
8178 phba->irq_chann_mode = NORMAL_MODE;
8185 lpfc_nvme_mod_param_dep(phba);
8188 * Get sli4 parameters that override parameters from Port capabilities.
8189 * If this call fails, it isn't critical unless the SLI4 parameters come
8192 rc = lpfc_get_sli4_parameters(phba, mboxq);
8194 if_type = bf_get(lpfc_sli_intf_if_type,
8195 &phba->sli4_hba.sli_intf);
8196 if_fam = bf_get(lpfc_sli_intf_sli_family,
8197 &phba->sli4_hba.sli_intf);
8198 if (phba->sli4_hba.extents_in_use &&
8199 phba->sli4_hba.rpi_hdrs_in_use) {
8200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201 "2999 Unsupported SLI4 Parameters "
8202 "Extents and RPI headers enabled.\n");
8203 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8204 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8205 mempool_free(mboxq, phba->mbox_mem_pool);
8207 goto out_free_bsmbx;
8210 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8211 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8212 mempool_free(mboxq, phba->mbox_mem_pool);
8214 goto out_free_bsmbx;
8219 * 1 for cmd, 1 for rsp, NVME adds an extra one
8220 * for boundary conditions in its max_sgl_segment template.
8223 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8227 * It doesn't matter what family our adapter is in, we are
8228 * limited to 2 Pages, 512 SGEs, for our SGL.
8229 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8231 max_buf_size = (2 * SLI4_PAGE_SIZE);
8234 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8235 * used to create the sg_dma_buf_pool must be calculated.
8237 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8238 /* Both cfg_enable_bg and cfg_external_dif code paths */
8241 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8242 * the FCP rsp, and a SGE. Sice we have no control
8243 * over how many protection segments the SCSI Layer
8244 * will hand us (ie: there could be one for every block
8245 * in the IO), just allocate enough SGEs to accomidate
8246 * our max amount and we need to limit lpfc_sg_seg_cnt
8247 * to minimize the risk of running out.
8249 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8250 sizeof(struct fcp_rsp) + max_buf_size;
8252 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8253 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8256 * If supporting DIF, reduce the seg count for scsi to
8257 * allow room for the DIF sges.
8259 if (phba->cfg_enable_bg &&
8260 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8261 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8263 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8267 * The scsi_buf for a regular I/O holds the FCP cmnd,
8268 * the FCP rsp, a SGE for each, and a SGE for up to
8269 * cfg_sg_seg_cnt data segments.
8271 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8272 sizeof(struct fcp_rsp) +
8273 ((phba->cfg_sg_seg_cnt + extra) *
8274 sizeof(struct sli4_sge));
8276 /* Total SGEs for scsi_sg_list */
8277 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8278 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8281 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8282 * need to post 1 page for the SGL.
8286 if (phba->cfg_xpsgl && !phba->nvmet_support)
8287 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8288 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8289 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8291 phba->cfg_sg_dma_buf_size =
8292 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8294 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8295 sizeof(struct sli4_sge);
8297 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8298 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8299 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8300 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8301 "6300 Reducing NVME sg segment "
8303 LPFC_MAX_NVME_SEG_CNT);
8304 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8306 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8310 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8311 "total:%d scsi:%d nvme:%d\n",
8312 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8313 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8314 phba->cfg_nvme_seg_cnt);
8316 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8317 i = phba->cfg_sg_dma_buf_size;
8321 phba->lpfc_sg_dma_buf_pool =
8322 dma_pool_create("lpfc_sg_dma_buf_pool",
8324 phba->cfg_sg_dma_buf_size,
8326 if (!phba->lpfc_sg_dma_buf_pool)
8327 goto out_free_bsmbx;
8329 phba->lpfc_cmd_rsp_buf_pool =
8330 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8332 sizeof(struct fcp_cmnd) +
8333 sizeof(struct fcp_rsp),
8335 if (!phba->lpfc_cmd_rsp_buf_pool)
8336 goto out_free_sg_dma_buf;
8338 mempool_free(mboxq, phba->mbox_mem_pool);
8340 /* Verify OAS is supported */
8341 lpfc_sli4_oas_verify(phba);
8343 /* Verify RAS support on adapter */
8344 lpfc_sli4_ras_init(phba);
8346 /* Verify all the SLI4 queues */
8347 rc = lpfc_sli4_queue_verify(phba);
8349 goto out_free_cmd_rsp_buf;
8351 /* Create driver internal CQE event pool */
8352 rc = lpfc_sli4_cq_event_pool_create(phba);
8354 goto out_free_cmd_rsp_buf;
8356 /* Initialize sgl lists per host */
8357 lpfc_init_sgl_list(phba);
8359 /* Allocate and initialize active sgl array */
8360 rc = lpfc_init_active_sgl_array(phba);
8362 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8363 "1430 Failed to initialize sgl list.\n");
8364 goto out_destroy_cq_event_pool;
8366 rc = lpfc_sli4_init_rpi_hdrs(phba);
8368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8369 "1432 Failed to initialize rpi headers.\n");
8370 goto out_free_active_sgl;
8373 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8374 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8375 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8377 if (!phba->fcf.fcf_rr_bmask) {
8378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8379 "2759 Failed allocate memory for FCF round "
8380 "robin failover bmask\n");
8382 goto out_remove_rpi_hdrs;
8385 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8386 sizeof(struct lpfc_hba_eq_hdl),
8388 if (!phba->sli4_hba.hba_eq_hdl) {
8389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8390 "2572 Failed allocate memory for "
8391 "fast-path per-EQ handle array\n");
8393 goto out_free_fcf_rr_bmask;
8396 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8397 sizeof(struct lpfc_vector_map_info),
8399 if (!phba->sli4_hba.cpu_map) {
8400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8401 "3327 Failed allocate memory for msi-x "
8402 "interrupt vector mapping\n");
8404 goto out_free_hba_eq_hdl;
8407 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8408 if (!phba->sli4_hba.eq_info) {
8409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8410 "3321 Failed allocation for per_cpu stats\n");
8412 goto out_free_hba_cpu_map;
8415 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8416 sizeof(*phba->sli4_hba.idle_stat),
8418 if (!phba->sli4_hba.idle_stat) {
8419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8420 "3390 Failed allocation for idle_stat\n");
8422 goto out_free_hba_eq_info;
8425 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8426 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8427 if (!phba->sli4_hba.c_stat) {
8428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8429 "3332 Failed allocating per cpu hdwq stats\n");
8431 goto out_free_hba_idle_stat;
8435 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8436 if (!phba->cmf_stat) {
8437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8438 "3331 Failed allocating per cpu cgn stats\n");
8440 goto out_free_hba_hdwq_info;
8444 * Enable sr-iov virtual functions if supported and configured
8445 * through the module parameter.
8447 if (phba->cfg_sriov_nr_virtfn > 0) {
8448 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8449 phba->cfg_sriov_nr_virtfn);
8451 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8452 "3020 Requested number of SR-IOV "
8453 "virtual functions (%d) is not "
8455 phba->cfg_sriov_nr_virtfn);
8456 phba->cfg_sriov_nr_virtfn = 0;
8462 out_free_hba_hdwq_info:
8463 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8464 free_percpu(phba->sli4_hba.c_stat);
8465 out_free_hba_idle_stat:
8467 kfree(phba->sli4_hba.idle_stat);
8468 out_free_hba_eq_info:
8469 free_percpu(phba->sli4_hba.eq_info);
8470 out_free_hba_cpu_map:
8471 kfree(phba->sli4_hba.cpu_map);
8472 out_free_hba_eq_hdl:
8473 kfree(phba->sli4_hba.hba_eq_hdl);
8474 out_free_fcf_rr_bmask:
8475 kfree(phba->fcf.fcf_rr_bmask);
8476 out_remove_rpi_hdrs:
8477 lpfc_sli4_remove_rpi_hdrs(phba);
8478 out_free_active_sgl:
8479 lpfc_free_active_sgl(phba);
8480 out_destroy_cq_event_pool:
8481 lpfc_sli4_cq_event_pool_destroy(phba);
8482 out_free_cmd_rsp_buf:
8483 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8484 phba->lpfc_cmd_rsp_buf_pool = NULL;
8485 out_free_sg_dma_buf:
8486 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8487 phba->lpfc_sg_dma_buf_pool = NULL;
8489 lpfc_destroy_bootstrap_mbox(phba);
8491 lpfc_mem_free(phba);
8496 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8497 * @phba: pointer to lpfc hba data structure.
8499 * This routine is invoked to unset the driver internal resources set up
8500 * specific for supporting the SLI-4 HBA device it attached to.
8503 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8505 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8507 free_percpu(phba->sli4_hba.eq_info);
8508 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8509 free_percpu(phba->sli4_hba.c_stat);
8511 free_percpu(phba->cmf_stat);
8512 kfree(phba->sli4_hba.idle_stat);
8514 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8515 kfree(phba->sli4_hba.cpu_map);
8516 phba->sli4_hba.num_possible_cpu = 0;
8517 phba->sli4_hba.num_present_cpu = 0;
8518 phba->sli4_hba.curr_disp_cpu = 0;
8519 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8521 /* Free memory allocated for fast-path work queue handles */
8522 kfree(phba->sli4_hba.hba_eq_hdl);
8524 /* Free the allocated rpi headers. */
8525 lpfc_sli4_remove_rpi_hdrs(phba);
8526 lpfc_sli4_remove_rpis(phba);
8528 /* Free eligible FCF index bmask */
8529 kfree(phba->fcf.fcf_rr_bmask);
8531 /* Free the ELS sgl list */
8532 lpfc_free_active_sgl(phba);
8533 lpfc_free_els_sgl_list(phba);
8534 lpfc_free_nvmet_sgl_list(phba);
8536 /* Free the completion queue EQ event pool */
8537 lpfc_sli4_cq_event_release_all(phba);
8538 lpfc_sli4_cq_event_pool_destroy(phba);
8540 /* Release resource identifiers. */
8541 lpfc_sli4_dealloc_resource_identifiers(phba);
8543 /* Free the bsmbx region. */
8544 lpfc_destroy_bootstrap_mbox(phba);
8546 /* Free the SLI Layer memory with SLI4 HBAs */
8547 lpfc_mem_free_all(phba);
8549 /* Free the current connect table */
8550 list_for_each_entry_safe(conn_entry, next_conn_entry,
8551 &phba->fcf_conn_rec_list, list) {
8552 list_del_init(&conn_entry->list);
8560 * lpfc_init_api_table_setup - Set up init api function jump table
8561 * @phba: The hba struct for which this call is being executed.
8562 * @dev_grp: The HBA PCI-Device group number.
8564 * This routine sets up the device INIT interface API function jump table
8567 * Returns: 0 - success, -ENODEV - failure.
8570 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8572 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8573 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8574 phba->lpfc_selective_reset = lpfc_selective_reset;
8576 case LPFC_PCI_DEV_LP:
8577 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8578 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8579 phba->lpfc_stop_port = lpfc_stop_port_s3;
8581 case LPFC_PCI_DEV_OC:
8582 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8583 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8584 phba->lpfc_stop_port = lpfc_stop_port_s4;
8587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8588 "1431 Invalid HBA PCI-device group: 0x%x\n",
8596 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8597 * @phba: pointer to lpfc hba data structure.
8599 * This routine is invoked to set up the driver internal resources after the
8600 * device specific resource setup to support the HBA device it attached to.
8604 * other values - error
8607 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8611 /* Startup the kernel thread for this host adapter. */
8612 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8613 "lpfc_worker_%d", phba->brd_no);
8614 if (IS_ERR(phba->worker_thread)) {
8615 error = PTR_ERR(phba->worker_thread);
8623 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8624 * @phba: pointer to lpfc hba data structure.
8626 * This routine is invoked to unset the driver internal resources set up after
8627 * the device specific resource setup for supporting the HBA device it
8631 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8634 destroy_workqueue(phba->wq);
8638 /* Stop kernel worker thread */
8639 if (phba->worker_thread)
8640 kthread_stop(phba->worker_thread);
8644 * lpfc_free_iocb_list - Free iocb list.
8645 * @phba: pointer to lpfc hba data structure.
8647 * This routine is invoked to free the driver's IOCB list and memory.
8650 lpfc_free_iocb_list(struct lpfc_hba *phba)
8652 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8654 spin_lock_irq(&phba->hbalock);
8655 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8656 &phba->lpfc_iocb_list, list) {
8657 list_del(&iocbq_entry->list);
8659 phba->total_iocbq_bufs--;
8661 spin_unlock_irq(&phba->hbalock);
8667 * lpfc_init_iocb_list - Allocate and initialize iocb list.
8668 * @phba: pointer to lpfc hba data structure.
8669 * @iocb_count: number of requested iocbs
8671 * This routine is invoked to allocate and initizlize the driver's IOCB
8672 * list and set up the IOCB tag array accordingly.
8676 * other values - error
8679 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8681 struct lpfc_iocbq *iocbq_entry = NULL;
8685 /* Initialize and populate the iocb list per host. */
8686 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8687 for (i = 0; i < iocb_count; i++) {
8688 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8689 if (iocbq_entry == NULL) {
8690 printk(KERN_ERR "%s: only allocated %d iocbs of "
8691 "expected %d count. Unloading driver.\n",
8692 __func__, i, iocb_count);
8693 goto out_free_iocbq;
8696 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8699 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8700 "Unloading driver.\n", __func__);
8701 goto out_free_iocbq;
8703 iocbq_entry->sli4_lxritag = NO_XRI;
8704 iocbq_entry->sli4_xritag = NO_XRI;
8706 spin_lock_irq(&phba->hbalock);
8707 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8708 phba->total_iocbq_bufs++;
8709 spin_unlock_irq(&phba->hbalock);
8715 lpfc_free_iocb_list(phba);
8721 * lpfc_free_sgl_list - Free a given sgl list.
8722 * @phba: pointer to lpfc hba data structure.
8723 * @sglq_list: pointer to the head of sgl list.
8725 * This routine is invoked to free a give sgl list and memory.
8728 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8730 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8732 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8733 list_del(&sglq_entry->list);
8734 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8740 * lpfc_free_els_sgl_list - Free els sgl list.
8741 * @phba: pointer to lpfc hba data structure.
8743 * This routine is invoked to free the driver's els sgl list and memory.
8746 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8748 LIST_HEAD(sglq_list);
8750 /* Retrieve all els sgls from driver list */
8751 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8752 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8753 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8755 /* Now free the sgl list */
8756 lpfc_free_sgl_list(phba, &sglq_list);
8760 * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8761 * @phba: pointer to lpfc hba data structure.
8763 * This routine is invoked to free the driver's nvmet sgl list and memory.
8766 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8768 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8769 LIST_HEAD(sglq_list);
8771 /* Retrieve all nvmet sgls from driver list */
8772 spin_lock_irq(&phba->hbalock);
8773 spin_lock(&phba->sli4_hba.sgl_list_lock);
8774 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8775 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8776 spin_unlock_irq(&phba->hbalock);
8778 /* Now free the sgl list */
8779 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8780 list_del(&sglq_entry->list);
8781 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8785 /* Update the nvmet_xri_cnt to reflect no current sgls.
8786 * The next initialization cycle sets the count and allocates
8787 * the sgls over again.
8789 phba->sli4_hba.nvmet_xri_cnt = 0;
8793 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8794 * @phba: pointer to lpfc hba data structure.
8796 * This routine is invoked to allocate the driver's active sgl memory.
8797 * This array will hold the sglq_entry's for active IOs.
8800 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8803 size = sizeof(struct lpfc_sglq *);
8804 size *= phba->sli4_hba.max_cfg_param.max_xri;
8806 phba->sli4_hba.lpfc_sglq_active_list =
8807 kzalloc(size, GFP_KERNEL);
8808 if (!phba->sli4_hba.lpfc_sglq_active_list)
8814 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8815 * @phba: pointer to lpfc hba data structure.
8817 * This routine is invoked to walk through the array of active sglq entries
8818 * and free all of the resources.
8819 * This is just a place holder for now.
8822 lpfc_free_active_sgl(struct lpfc_hba *phba)
8824 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8828 * lpfc_init_sgl_list - Allocate and initialize sgl list.
8829 * @phba: pointer to lpfc hba data structure.
8831 * This routine is invoked to allocate and initizlize the driver's sgl
8832 * list and set up the sgl xritag tag array accordingly.
8836 lpfc_init_sgl_list(struct lpfc_hba *phba)
8838 /* Initialize and populate the sglq list per host/VF. */
8839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8842 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8844 /* els xri-sgl book keeping */
8845 phba->sli4_hba.els_xri_cnt = 0;
8847 /* nvme xri-buffer book keeping */
8848 phba->sli4_hba.io_xri_cnt = 0;
8852 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8853 * @phba: pointer to lpfc hba data structure.
8855 * This routine is invoked to post rpi header templates to the
8856 * port for those SLI4 ports that do not support extents. This routine
8857 * posts a PAGE_SIZE memory region to the port to hold up to
8858 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
8859 * and should be called only when interrupts are disabled.
8863 * -ERROR - otherwise.
8866 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8869 struct lpfc_rpi_hdr *rpi_hdr;
8871 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8872 if (!phba->sli4_hba.rpi_hdrs_in_use)
8874 if (phba->sli4_hba.extents_in_use)
8877 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8880 "0391 Error during rpi post operation\n");
8881 lpfc_sli4_remove_rpis(phba);
8889 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8890 * @phba: pointer to lpfc hba data structure.
8892 * This routine is invoked to allocate a single 4KB memory region to
8893 * support rpis and stores them in the phba. This single region
8894 * provides support for up to 64 rpis. The region is used globally
8898 * A valid rpi hdr on success.
8899 * A NULL pointer on any failure.
8901 struct lpfc_rpi_hdr *
8902 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8904 uint16_t rpi_limit, curr_rpi_range;
8905 struct lpfc_dmabuf *dmabuf;
8906 struct lpfc_rpi_hdr *rpi_hdr;
8909 * If the SLI4 port supports extents, posting the rpi header isn't
8910 * required. Set the expected maximum count and let the actual value
8911 * get set when extents are fully allocated.
8913 if (!phba->sli4_hba.rpi_hdrs_in_use)
8915 if (phba->sli4_hba.extents_in_use)
8918 /* The limit on the logical index is just the max_rpi count. */
8919 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8921 spin_lock_irq(&phba->hbalock);
8923 * Establish the starting RPI in this header block. The starting
8924 * rpi is normalized to a zero base because the physical rpi is
8927 curr_rpi_range = phba->sli4_hba.next_rpi;
8928 spin_unlock_irq(&phba->hbalock);
8930 /* Reached full RPI range */
8931 if (curr_rpi_range == rpi_limit)
8935 * First allocate the protocol header region for the port. The
8936 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8938 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8942 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8943 LPFC_HDR_TEMPLATE_SIZE,
8944 &dmabuf->phys, GFP_KERNEL);
8945 if (!dmabuf->virt) {
8947 goto err_free_dmabuf;
8950 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8952 goto err_free_coherent;
8955 /* Save the rpi header data for cleanup later. */
8956 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8958 goto err_free_coherent;
8960 rpi_hdr->dmabuf = dmabuf;
8961 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8962 rpi_hdr->page_count = 1;
8963 spin_lock_irq(&phba->hbalock);
8965 /* The rpi_hdr stores the logical index only. */
8966 rpi_hdr->start_rpi = curr_rpi_range;
8967 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8968 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8970 spin_unlock_irq(&phba->hbalock);
8974 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8975 dmabuf->virt, dmabuf->phys);
8982 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8983 * @phba: pointer to lpfc hba data structure.
8985 * This routine is invoked to remove all memory resources allocated
8986 * to support rpis for SLI4 ports not supporting extents. This routine
8987 * presumes the caller has released all rpis consumed by fabric or port
8988 * logins and is prepared to have the header pages removed.
8991 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8993 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8995 if (!phba->sli4_hba.rpi_hdrs_in_use)
8998 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8999 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9000 list_del(&rpi_hdr->list);
9001 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9002 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9003 kfree(rpi_hdr->dmabuf);
9007 /* There are no rpis available to the port now. */
9008 phba->sli4_hba.next_rpi = 0;
9012 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9013 * @pdev: pointer to pci device data structure.
9015 * This routine is invoked to allocate the driver hba data structure for an
9016 * HBA device. If the allocation is successful, the phba reference to the
9017 * PCI device data structure is set.
9020 * pointer to @phba - successful
9023 static struct lpfc_hba *
9024 lpfc_hba_alloc(struct pci_dev *pdev)
9026 struct lpfc_hba *phba;
9028 /* Allocate memory for HBA structure */
9029 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9031 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9035 /* Set reference to PCI device in HBA structure */
9036 phba->pcidev = pdev;
9038 /* Assign an unused board number */
9039 phba->brd_no = lpfc_get_instance();
9040 if (phba->brd_no < 0) {
9044 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9046 spin_lock_init(&phba->ct_ev_lock);
9047 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9053 * lpfc_hba_free - Free driver hba data structure with a device.
9054 * @phba: pointer to lpfc hba data structure.
9056 * This routine is invoked to free the driver hba data structure with an
9060 lpfc_hba_free(struct lpfc_hba *phba)
9062 if (phba->sli_rev == LPFC_SLI_REV4)
9063 kfree(phba->sli4_hba.hdwq);
9065 /* Release the driver assigned board number */
9066 idr_remove(&lpfc_hba_index, phba->brd_no);
9068 /* Free memory allocated with sli3 rings */
9069 kfree(phba->sli.sli3_ring);
9070 phba->sli.sli3_ring = NULL;
9077 * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9078 * @vport: pointer to lpfc vport data structure.
9080 * This routine is will setup initial FDMI attribute masks for
9081 * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9082 * to get these attributes first before falling back, the attribute
9083 * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9086 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9088 struct lpfc_hba *phba = vport->phba;
9090 vport->load_flag |= FC_ALLOW_FDMI;
9091 if (phba->cfg_enable_SmartSAN ||
9092 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9093 /* Setup appropriate attribute masks */
9094 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9095 if (phba->cfg_enable_SmartSAN)
9096 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9098 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9102 "6077 Setup FDMI mask: hba x%x port x%x\n",
9103 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9107 * lpfc_create_shost - Create hba physical port with associated scsi host.
9108 * @phba: pointer to lpfc hba data structure.
9110 * This routine is invoked to create HBA physical port and associate a SCSI
9115 * other values - error
9118 lpfc_create_shost(struct lpfc_hba *phba)
9120 struct lpfc_vport *vport;
9121 struct Scsi_Host *shost;
9123 /* Initialize HBA FC structure */
9124 phba->fc_edtov = FF_DEF_EDTOV;
9125 phba->fc_ratov = FF_DEF_RATOV;
9126 phba->fc_altov = FF_DEF_ALTOV;
9127 phba->fc_arbtov = FF_DEF_ARBTOV;
9129 atomic_set(&phba->sdev_cnt, 0);
9130 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9134 shost = lpfc_shost_from_vport(vport);
9135 phba->pport = vport;
9137 if (phba->nvmet_support) {
9138 /* Only 1 vport (pport) will support NVME target */
9139 phba->targetport = NULL;
9140 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9142 "6076 NVME Target Found\n");
9145 lpfc_debugfs_initialize(vport);
9146 /* Put reference to SCSI host to driver's device private data */
9147 pci_set_drvdata(phba->pcidev, shost);
9149 lpfc_setup_fdmi_mask(vport);
9152 * At this point we are fully registered with PSA. In addition,
9153 * any initial discovery should be completed.
9159 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9160 * @phba: pointer to lpfc hba data structure.
9162 * This routine is invoked to destroy HBA physical port and the associated
9166 lpfc_destroy_shost(struct lpfc_hba *phba)
9168 struct lpfc_vport *vport = phba->pport;
9170 /* Destroy physical port that associated with the SCSI host */
9171 destroy_port(vport);
9177 * lpfc_setup_bg - Setup Block guard structures and debug areas.
9178 * @phba: pointer to lpfc hba data structure.
9179 * @shost: the shost to be used to detect Block guard settings.
9181 * This routine sets up the local Block guard protocol settings for @shost.
9182 * This routine also allocates memory for debugging bg buffers.
9185 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9190 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9191 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9192 "1478 Registering BlockGuard with the "
9195 old_mask = phba->cfg_prot_mask;
9196 old_guard = phba->cfg_prot_guard;
9198 /* Only allow supported values */
9199 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9200 SHOST_DIX_TYPE0_PROTECTION |
9201 SHOST_DIX_TYPE1_PROTECTION);
9202 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9203 SHOST_DIX_GUARD_CRC);
9205 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9206 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9207 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9209 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9210 if ((old_mask != phba->cfg_prot_mask) ||
9211 (old_guard != phba->cfg_prot_guard))
9212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9213 "1475 Registering BlockGuard with the "
9214 "SCSI layer: mask %d guard %d\n",
9215 phba->cfg_prot_mask,
9216 phba->cfg_prot_guard);
9218 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9219 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9222 "1479 Not Registering BlockGuard with the SCSI "
9223 "layer, Bad protection parameters: %d %d\n",
9224 old_mask, old_guard);
9229 * lpfc_post_init_setup - Perform necessary device post initialization setup.
9230 * @phba: pointer to lpfc hba data structure.
9232 * This routine is invoked to perform all the necessary post initialization
9233 * setup for the device.
9236 lpfc_post_init_setup(struct lpfc_hba *phba)
9238 struct Scsi_Host *shost;
9239 struct lpfc_adapter_event_header adapter_event;
9241 /* Get the default values for Model Name and Description */
9242 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9245 * hba setup may have changed the hba_queue_depth so we need to
9246 * adjust the value of can_queue.
9248 shost = pci_get_drvdata(phba->pcidev);
9249 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9251 lpfc_host_attrib_init(shost);
9253 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9254 spin_lock_irq(shost->host_lock);
9255 lpfc_poll_start_timer(phba);
9256 spin_unlock_irq(shost->host_lock);
9259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9260 "0428 Perform SCSI scan\n");
9261 /* Send board arrival event to upper layer */
9262 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9263 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9264 fc_host_post_vendor_event(shost, fc_get_event_number(),
9265 sizeof(adapter_event),
9266 (char *) &adapter_event,
9272 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9273 * @phba: pointer to lpfc hba data structure.
9275 * This routine is invoked to set up the PCI device memory space for device
9276 * with SLI-3 interface spec.
9280 * other values - error
9283 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9285 struct pci_dev *pdev = phba->pcidev;
9286 unsigned long bar0map_len, bar2map_len;
9294 /* Set the device DMA mask size */
9295 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9297 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9302 /* Get the bus address of Bar0 and Bar2 and the number of bytes
9303 * required by each mapping.
9305 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9306 bar0map_len = pci_resource_len(pdev, 0);
9308 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9309 bar2map_len = pci_resource_len(pdev, 2);
9311 /* Map HBA SLIM to a kernel virtual address. */
9312 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9313 if (!phba->slim_memmap_p) {
9314 dev_printk(KERN_ERR, &pdev->dev,
9315 "ioremap failed for SLIM memory.\n");
9319 /* Map HBA Control Registers to a kernel virtual address. */
9320 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9321 if (!phba->ctrl_regs_memmap_p) {
9322 dev_printk(KERN_ERR, &pdev->dev,
9323 "ioremap failed for HBA control registers.\n");
9324 goto out_iounmap_slim;
9327 /* Allocate memory for SLI-2 structures */
9328 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9329 &phba->slim2p.phys, GFP_KERNEL);
9330 if (!phba->slim2p.virt)
9333 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9334 phba->mbox_ext = (phba->slim2p.virt +
9335 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9336 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9337 phba->IOCBs = (phba->slim2p.virt +
9338 offsetof(struct lpfc_sli2_slim, IOCBs));
9340 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9341 lpfc_sli_hbq_size(),
9342 &phba->hbqslimp.phys,
9344 if (!phba->hbqslimp.virt)
9347 hbq_count = lpfc_sli_hbq_count();
9348 ptr = phba->hbqslimp.virt;
9349 for (i = 0; i < hbq_count; ++i) {
9350 phba->hbqs[i].hbq_virt = ptr;
9351 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9352 ptr += (lpfc_hbq_defs[i]->entry_count *
9353 sizeof(struct lpfc_hbq_entry));
9355 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9356 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9358 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9360 phba->MBslimaddr = phba->slim_memmap_p;
9361 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9362 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9363 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9364 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9369 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9370 phba->slim2p.virt, phba->slim2p.phys);
9372 iounmap(phba->ctrl_regs_memmap_p);
9374 iounmap(phba->slim_memmap_p);
9380 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9381 * @phba: pointer to lpfc hba data structure.
9383 * This routine is invoked to unset the PCI device memory space for device
9384 * with SLI-3 interface spec.
9387 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9389 struct pci_dev *pdev;
9391 /* Obtain PCI device reference */
9395 pdev = phba->pcidev;
9397 /* Free coherent DMA memory allocated */
9398 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9399 phba->hbqslimp.virt, phba->hbqslimp.phys);
9400 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9401 phba->slim2p.virt, phba->slim2p.phys);
9403 /* I/O memory unmap */
9404 iounmap(phba->ctrl_regs_memmap_p);
9405 iounmap(phba->slim_memmap_p);
9411 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9412 * @phba: pointer to lpfc hba data structure.
9414 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9415 * done and check status.
9417 * Return 0 if successful, otherwise -ENODEV.
9420 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9422 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9423 struct lpfc_register reg_data;
9424 int i, port_error = 0;
9427 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9428 memset(®_data, 0, sizeof(reg_data));
9429 if (!phba->sli4_hba.PSMPHRregaddr)
9432 /* Wait up to 30 seconds for the SLI Port POST done and ready */
9433 for (i = 0; i < 3000; i++) {
9434 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9435 &portsmphr_reg.word0) ||
9436 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9437 /* Port has a fatal POST error, break out */
9438 port_error = -ENODEV;
9441 if (LPFC_POST_STAGE_PORT_READY ==
9442 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9448 * If there was a port error during POST, then don't proceed with
9449 * other register reads as the data may not be valid. Just exit.
9452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9453 "1408 Port Failed POST - portsmphr=0x%x, "
9454 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9455 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9456 portsmphr_reg.word0,
9457 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9458 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9459 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9460 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9461 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9462 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9463 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9464 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9466 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9467 "2534 Device Info: SLIFamily=0x%x, "
9468 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9469 "SLIHint_2=0x%x, FT=0x%x\n",
9470 bf_get(lpfc_sli_intf_sli_family,
9471 &phba->sli4_hba.sli_intf),
9472 bf_get(lpfc_sli_intf_slirev,
9473 &phba->sli4_hba.sli_intf),
9474 bf_get(lpfc_sli_intf_if_type,
9475 &phba->sli4_hba.sli_intf),
9476 bf_get(lpfc_sli_intf_sli_hint1,
9477 &phba->sli4_hba.sli_intf),
9478 bf_get(lpfc_sli_intf_sli_hint2,
9479 &phba->sli4_hba.sli_intf),
9480 bf_get(lpfc_sli_intf_func_type,
9481 &phba->sli4_hba.sli_intf));
9483 * Check for other Port errors during the initialization
9484 * process. Fail the load if the port did not come up
9487 if_type = bf_get(lpfc_sli_intf_if_type,
9488 &phba->sli4_hba.sli_intf);
9490 case LPFC_SLI_INTF_IF_TYPE_0:
9491 phba->sli4_hba.ue_mask_lo =
9492 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9493 phba->sli4_hba.ue_mask_hi =
9494 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9496 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9498 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9499 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9500 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9501 lpfc_printf_log(phba, KERN_ERR,
9503 "1422 Unrecoverable Error "
9504 "Detected during POST "
9505 "uerr_lo_reg=0x%x, "
9506 "uerr_hi_reg=0x%x, "
9507 "ue_mask_lo_reg=0x%x, "
9508 "ue_mask_hi_reg=0x%x\n",
9511 phba->sli4_hba.ue_mask_lo,
9512 phba->sli4_hba.ue_mask_hi);
9513 port_error = -ENODEV;
9516 case LPFC_SLI_INTF_IF_TYPE_2:
9517 case LPFC_SLI_INTF_IF_TYPE_6:
9518 /* Final checks. The port status should be clean. */
9519 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9521 (bf_get(lpfc_sliport_status_err, ®_data) &&
9522 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9523 phba->work_status[0] =
9524 readl(phba->sli4_hba.u.if_type2.
9526 phba->work_status[1] =
9527 readl(phba->sli4_hba.u.if_type2.
9529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9530 "2888 Unrecoverable port error "
9531 "following POST: port status reg "
9532 "0x%x, port_smphr reg 0x%x, "
9533 "error 1=0x%x, error 2=0x%x\n",
9535 portsmphr_reg.word0,
9536 phba->work_status[0],
9537 phba->work_status[1]);
9538 port_error = -ENODEV;
9542 if (lpfc_pldv_detect &&
9543 bf_get(lpfc_sli_intf_sli_family,
9544 &phba->sli4_hba.sli_intf) ==
9545 LPFC_SLI_INTF_FAMILY_G6)
9546 pci_write_config_byte(phba->pcidev,
9547 LPFC_SLI_INTF, CFG_PLD);
9549 case LPFC_SLI_INTF_IF_TYPE_1:
9558 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9559 * @phba: pointer to lpfc hba data structure.
9560 * @if_type: The SLI4 interface type getting configured.
9562 * This routine is invoked to set up SLI4 BAR0 PCI config space register
9566 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9569 case LPFC_SLI_INTF_IF_TYPE_0:
9570 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9572 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9573 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9574 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9575 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9576 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9577 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9578 phba->sli4_hba.SLIINTFregaddr =
9579 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9581 case LPFC_SLI_INTF_IF_TYPE_2:
9582 phba->sli4_hba.u.if_type2.EQDregaddr =
9583 phba->sli4_hba.conf_regs_memmap_p +
9584 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9585 phba->sli4_hba.u.if_type2.ERR1regaddr =
9586 phba->sli4_hba.conf_regs_memmap_p +
9587 LPFC_CTL_PORT_ER1_OFFSET;
9588 phba->sli4_hba.u.if_type2.ERR2regaddr =
9589 phba->sli4_hba.conf_regs_memmap_p +
9590 LPFC_CTL_PORT_ER2_OFFSET;
9591 phba->sli4_hba.u.if_type2.CTRLregaddr =
9592 phba->sli4_hba.conf_regs_memmap_p +
9593 LPFC_CTL_PORT_CTL_OFFSET;
9594 phba->sli4_hba.u.if_type2.STATUSregaddr =
9595 phba->sli4_hba.conf_regs_memmap_p +
9596 LPFC_CTL_PORT_STA_OFFSET;
9597 phba->sli4_hba.SLIINTFregaddr =
9598 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9599 phba->sli4_hba.PSMPHRregaddr =
9600 phba->sli4_hba.conf_regs_memmap_p +
9601 LPFC_CTL_PORT_SEM_OFFSET;
9602 phba->sli4_hba.RQDBregaddr =
9603 phba->sli4_hba.conf_regs_memmap_p +
9604 LPFC_ULP0_RQ_DOORBELL;
9605 phba->sli4_hba.WQDBregaddr =
9606 phba->sli4_hba.conf_regs_memmap_p +
9607 LPFC_ULP0_WQ_DOORBELL;
9608 phba->sli4_hba.CQDBregaddr =
9609 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9610 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9611 phba->sli4_hba.MQDBregaddr =
9612 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9613 phba->sli4_hba.BMBXregaddr =
9614 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9616 case LPFC_SLI_INTF_IF_TYPE_6:
9617 phba->sli4_hba.u.if_type2.EQDregaddr =
9618 phba->sli4_hba.conf_regs_memmap_p +
9619 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9620 phba->sli4_hba.u.if_type2.ERR1regaddr =
9621 phba->sli4_hba.conf_regs_memmap_p +
9622 LPFC_CTL_PORT_ER1_OFFSET;
9623 phba->sli4_hba.u.if_type2.ERR2regaddr =
9624 phba->sli4_hba.conf_regs_memmap_p +
9625 LPFC_CTL_PORT_ER2_OFFSET;
9626 phba->sli4_hba.u.if_type2.CTRLregaddr =
9627 phba->sli4_hba.conf_regs_memmap_p +
9628 LPFC_CTL_PORT_CTL_OFFSET;
9629 phba->sli4_hba.u.if_type2.STATUSregaddr =
9630 phba->sli4_hba.conf_regs_memmap_p +
9631 LPFC_CTL_PORT_STA_OFFSET;
9632 phba->sli4_hba.PSMPHRregaddr =
9633 phba->sli4_hba.conf_regs_memmap_p +
9634 LPFC_CTL_PORT_SEM_OFFSET;
9635 phba->sli4_hba.BMBXregaddr =
9636 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9638 case LPFC_SLI_INTF_IF_TYPE_1:
9640 dev_printk(KERN_ERR, &phba->pcidev->dev,
9641 "FATAL - unsupported SLI4 interface type - %d\n",
9648 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9649 * @phba: pointer to lpfc hba data structure.
9650 * @if_type: sli if type to operate on.
9652 * This routine is invoked to set up SLI4 BAR1 register memory map.
9655 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9658 case LPFC_SLI_INTF_IF_TYPE_0:
9659 phba->sli4_hba.PSMPHRregaddr =
9660 phba->sli4_hba.ctrl_regs_memmap_p +
9661 LPFC_SLIPORT_IF0_SMPHR;
9662 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9664 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9666 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9669 case LPFC_SLI_INTF_IF_TYPE_6:
9670 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9671 LPFC_IF6_RQ_DOORBELL;
9672 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9673 LPFC_IF6_WQ_DOORBELL;
9674 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9675 LPFC_IF6_CQ_DOORBELL;
9676 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9677 LPFC_IF6_EQ_DOORBELL;
9678 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9679 LPFC_IF6_MQ_DOORBELL;
9681 case LPFC_SLI_INTF_IF_TYPE_2:
9682 case LPFC_SLI_INTF_IF_TYPE_1:
9684 dev_err(&phba->pcidev->dev,
9685 "FATAL - unsupported SLI4 interface type - %d\n",
9692 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9693 * @phba: pointer to lpfc hba data structure.
9694 * @vf: virtual function number
9696 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9697 * based on the given viftual function number, @vf.
9699 * Return 0 if successful, otherwise -ENODEV.
9702 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9704 if (vf > LPFC_VIR_FUNC_MAX)
9707 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9708 vf * LPFC_VFR_PAGE_SIZE +
9709 LPFC_ULP0_RQ_DOORBELL);
9710 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9711 vf * LPFC_VFR_PAGE_SIZE +
9712 LPFC_ULP0_WQ_DOORBELL);
9713 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9714 vf * LPFC_VFR_PAGE_SIZE +
9715 LPFC_EQCQ_DOORBELL);
9716 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9717 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9718 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9719 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9720 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9725 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9726 * @phba: pointer to lpfc hba data structure.
9728 * This routine is invoked to create the bootstrap mailbox
9729 * region consistent with the SLI-4 interface spec. This
9730 * routine allocates all memory necessary to communicate
9731 * mailbox commands to the port and sets up all alignment
9732 * needs. No locks are expected to be held when calling
9737 * -ENOMEM - could not allocated memory.
9740 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9743 struct lpfc_dmabuf *dmabuf;
9744 struct dma_address *dma_address;
9748 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9753 * The bootstrap mailbox region is comprised of 2 parts
9754 * plus an alignment restriction of 16 bytes.
9756 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9757 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9758 &dmabuf->phys, GFP_KERNEL);
9759 if (!dmabuf->virt) {
9765 * Initialize the bootstrap mailbox pointers now so that the register
9766 * operations are simple later. The mailbox dma address is required
9767 * to be 16-byte aligned. Also align the virtual memory as each
9768 * maibox is copied into the bmbx mailbox region before issuing the
9769 * command to the port.
9771 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9772 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9774 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9775 LPFC_ALIGN_16_BYTE);
9776 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9777 LPFC_ALIGN_16_BYTE);
9780 * Set the high and low physical addresses now. The SLI4 alignment
9781 * requirement is 16 bytes and the mailbox is posted to the port
9782 * as two 30-bit addresses. The other data is a bit marking whether
9783 * the 30-bit address is the high or low address.
9784 * Upcast bmbx aphys to 64bits so shift instruction compiles
9785 * clean on 32 bit machines.
9787 dma_address = &phba->sli4_hba.bmbx.dma_address;
9788 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9789 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9790 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9791 LPFC_BMBX_BIT1_ADDR_HI);
9793 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9794 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9795 LPFC_BMBX_BIT1_ADDR_LO);
9800 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9801 * @phba: pointer to lpfc hba data structure.
9803 * This routine is invoked to teardown the bootstrap mailbox
9804 * region and release all host resources. This routine requires
9805 * the caller to ensure all mailbox commands recovered, no
9806 * additional mailbox comands are sent, and interrupts are disabled
9807 * before calling this routine.
9811 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9813 dma_free_coherent(&phba->pcidev->dev,
9814 phba->sli4_hba.bmbx.bmbx_size,
9815 phba->sli4_hba.bmbx.dmabuf->virt,
9816 phba->sli4_hba.bmbx.dmabuf->phys);
9818 kfree(phba->sli4_hba.bmbx.dmabuf);
9819 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9822 static const char * const lpfc_topo_to_str[] = {
9832 #define LINK_FLAGS_DEF 0x0
9833 #define LINK_FLAGS_P2P 0x1
9834 #define LINK_FLAGS_LOOP 0x2
9836 * lpfc_map_topology - Map the topology read from READ_CONFIG
9837 * @phba: pointer to lpfc hba data structure.
9838 * @rd_config: pointer to read config data
9840 * This routine is invoked to map the topology values as read
9841 * from the read config mailbox command. If the persistent
9842 * topology feature is supported, the firmware will provide the
9843 * saved topology information to be used in INIT_LINK
9846 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9850 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9851 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9852 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9854 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9855 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9858 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9859 "2019 FW does not support persistent topology "
9860 "Using driver parameter defined value [%s]",
9861 lpfc_topo_to_str[phba->cfg_topology]);
9864 /* FW supports persistent topology - override module parameter value */
9865 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9867 /* if ASIC_GEN_NUM >= 0xC) */
9868 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9869 LPFC_SLI_INTF_IF_TYPE_6) ||
9870 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9871 LPFC_SLI_INTF_FAMILY_G6)) {
9873 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9874 ? FLAGS_TOPOLOGY_MODE_LOOP
9875 : FLAGS_TOPOLOGY_MODE_PT_PT);
9877 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9881 /* If topology failover set - pt is '0' or '1' */
9882 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9883 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9885 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9886 ? FLAGS_TOPOLOGY_MODE_PT_PT
9887 : FLAGS_TOPOLOGY_MODE_LOOP);
9890 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9892 "2020 Using persistent topology value [%s]",
9893 lpfc_topo_to_str[phba->cfg_topology]);
9895 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9896 "2021 Invalid topology values from FW "
9897 "Using driver parameter defined value [%s]",
9898 lpfc_topo_to_str[phba->cfg_topology]);
9903 * lpfc_sli4_read_config - Get the config parameters.
9904 * @phba: pointer to lpfc hba data structure.
9906 * This routine is invoked to read the configuration parameters from the HBA.
9907 * The configuration parameters are used to set the base and maximum values
9908 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9909 * allocation for the port.
9913 * -ENOMEM - No available memory
9914 * -EIO - The mailbox failed to complete successfully.
9917 lpfc_sli4_read_config(struct lpfc_hba *phba)
9920 struct lpfc_mbx_read_config *rd_config;
9921 union lpfc_sli4_cfg_shdr *shdr;
9922 uint32_t shdr_status, shdr_add_status;
9923 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9924 struct lpfc_rsrc_desc_fcfcoe *desc;
9926 uint16_t forced_link_speed;
9927 uint32_t if_type, qmin, fawwpn;
9928 int length, i, rc = 0, rc2;
9930 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9933 "2011 Unable to allocate memory for issuing "
9934 "SLI_CONFIG_SPECIAL mailbox command\n");
9938 lpfc_read_config(phba, pmb);
9940 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9941 if (rc != MBX_SUCCESS) {
9942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9943 "2012 Mailbox failed , mbxCmd x%x "
9944 "READ_CONFIG, mbxStatus x%x\n",
9945 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9946 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9949 rd_config = &pmb->u.mqe.un.rd_config;
9950 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9951 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9952 phba->sli4_hba.lnk_info.lnk_tp =
9953 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9954 phba->sli4_hba.lnk_info.lnk_no =
9955 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9957 "3081 lnk_type:%d, lnk_numb:%d\n",
9958 phba->sli4_hba.lnk_info.lnk_tp,
9959 phba->sli4_hba.lnk_info.lnk_no);
9961 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9962 "3082 Mailbox (x%x) returned ldv:x0\n",
9963 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9964 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9965 phba->bbcredit_support = 1;
9966 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9969 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9972 lpfc_printf_log(phba, KERN_INFO,
9973 LOG_INIT | LOG_DISCOVERY,
9974 "2702 READ_CONFIG: FA-PWWN is "
9976 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9978 phba->sli4_hba.fawwpn_flag = 0;
9981 phba->sli4_hba.conf_trunk =
9982 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9983 phba->sli4_hba.extents_in_use =
9984 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9986 phba->sli4_hba.max_cfg_param.max_xri =
9987 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9988 /* Reduce resource usage in kdump environment */
9989 if (is_kdump_kernel() &&
9990 phba->sli4_hba.max_cfg_param.max_xri > 512)
9991 phba->sli4_hba.max_cfg_param.max_xri = 512;
9992 phba->sli4_hba.max_cfg_param.xri_base =
9993 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9994 phba->sli4_hba.max_cfg_param.max_vpi =
9995 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9996 /* Limit the max we support */
9997 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9998 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9999 phba->sli4_hba.max_cfg_param.vpi_base =
10000 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10001 phba->sli4_hba.max_cfg_param.max_rpi =
10002 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10003 phba->sli4_hba.max_cfg_param.rpi_base =
10004 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10005 phba->sli4_hba.max_cfg_param.max_vfi =
10006 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10007 phba->sli4_hba.max_cfg_param.vfi_base =
10008 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10009 phba->sli4_hba.max_cfg_param.max_fcfi =
10010 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10011 phba->sli4_hba.max_cfg_param.max_eq =
10012 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10013 phba->sli4_hba.max_cfg_param.max_rq =
10014 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10015 phba->sli4_hba.max_cfg_param.max_wq =
10016 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10017 phba->sli4_hba.max_cfg_param.max_cq =
10018 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10019 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10020 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10021 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10022 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10023 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10024 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10025 phba->max_vports = phba->max_vpi;
10027 /* Next decide on FPIN or Signal E2E CGN support
10028 * For congestion alarms and warnings valid combination are:
10029 * 1. FPIN alarms / FPIN warnings
10030 * 2. Signal alarms / Signal warnings
10031 * 3. FPIN alarms / Signal warnings
10032 * 4. Signal alarms / FPIN warnings
10034 * Initialize the adapter frequency to 100 mSecs
10036 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10037 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10038 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10040 if (lpfc_use_cgn_signal) {
10041 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10042 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10043 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10045 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10046 /* MUST support both alarm and warning
10047 * because EDC does not support alarm alone.
10049 if (phba->cgn_reg_signal !=
10050 EDC_CG_SIG_WARN_ONLY) {
10051 /* Must support both or none */
10052 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10053 phba->cgn_reg_signal =
10054 EDC_CG_SIG_NOTSUPPORTED;
10056 phba->cgn_reg_signal =
10057 EDC_CG_SIG_WARN_ALARM;
10058 phba->cgn_reg_fpin =
10059 LPFC_CGN_FPIN_NONE;
10064 /* Set the congestion initial signal and fpin values. */
10065 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10066 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10068 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10069 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10070 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10072 lpfc_map_topology(phba, rd_config);
10073 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10074 "2003 cfg params Extents? %d "
10079 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10080 phba->sli4_hba.extents_in_use,
10081 phba->sli4_hba.max_cfg_param.xri_base,
10082 phba->sli4_hba.max_cfg_param.max_xri,
10083 phba->sli4_hba.max_cfg_param.vpi_base,
10084 phba->sli4_hba.max_cfg_param.max_vpi,
10085 phba->sli4_hba.max_cfg_param.vfi_base,
10086 phba->sli4_hba.max_cfg_param.max_vfi,
10087 phba->sli4_hba.max_cfg_param.rpi_base,
10088 phba->sli4_hba.max_cfg_param.max_rpi,
10089 phba->sli4_hba.max_cfg_param.max_fcfi,
10090 phba->sli4_hba.max_cfg_param.max_eq,
10091 phba->sli4_hba.max_cfg_param.max_cq,
10092 phba->sli4_hba.max_cfg_param.max_wq,
10093 phba->sli4_hba.max_cfg_param.max_rq,
10097 * Calculate queue resources based on how
10098 * many WQ/CQ/EQs are available.
10100 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10101 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10102 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10103 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10104 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10106 * Whats left after this can go toward NVME / FCP.
10107 * The minus 4 accounts for ELS, NVME LS, MBOX
10108 * plus one extra. When configured for
10109 * NVMET, FCP io channel WQs are not created.
10113 /* Check to see if there is enough for NVME */
10114 if ((phba->cfg_irq_chann > qmin) ||
10115 (phba->cfg_hdw_queue > qmin)) {
10116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10117 "2005 Reducing Queues - "
10118 "FW resource limitation: "
10119 "WQ %d CQ %d EQ %d: min %d: "
10120 "IRQ %d HDWQ %d\n",
10121 phba->sli4_hba.max_cfg_param.max_wq,
10122 phba->sli4_hba.max_cfg_param.max_cq,
10123 phba->sli4_hba.max_cfg_param.max_eq,
10124 qmin, phba->cfg_irq_chann,
10125 phba->cfg_hdw_queue);
10127 if (phba->cfg_irq_chann > qmin)
10128 phba->cfg_irq_chann = qmin;
10129 if (phba->cfg_hdw_queue > qmin)
10130 phba->cfg_hdw_queue = qmin;
10137 /* Update link speed if forced link speed is supported */
10138 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10139 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10140 forced_link_speed =
10141 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10142 if (forced_link_speed) {
10143 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10145 switch (forced_link_speed) {
10146 case LINK_SPEED_1G:
10147 phba->cfg_link_speed =
10148 LPFC_USER_LINK_SPEED_1G;
10150 case LINK_SPEED_2G:
10151 phba->cfg_link_speed =
10152 LPFC_USER_LINK_SPEED_2G;
10154 case LINK_SPEED_4G:
10155 phba->cfg_link_speed =
10156 LPFC_USER_LINK_SPEED_4G;
10158 case LINK_SPEED_8G:
10159 phba->cfg_link_speed =
10160 LPFC_USER_LINK_SPEED_8G;
10162 case LINK_SPEED_10G:
10163 phba->cfg_link_speed =
10164 LPFC_USER_LINK_SPEED_10G;
10166 case LINK_SPEED_16G:
10167 phba->cfg_link_speed =
10168 LPFC_USER_LINK_SPEED_16G;
10170 case LINK_SPEED_32G:
10171 phba->cfg_link_speed =
10172 LPFC_USER_LINK_SPEED_32G;
10174 case LINK_SPEED_64G:
10175 phba->cfg_link_speed =
10176 LPFC_USER_LINK_SPEED_64G;
10179 phba->cfg_link_speed =
10180 LPFC_USER_LINK_SPEED_AUTO;
10183 lpfc_printf_log(phba, KERN_ERR,
10185 "0047 Unrecognized link "
10187 forced_link_speed);
10188 phba->cfg_link_speed =
10189 LPFC_USER_LINK_SPEED_AUTO;
10194 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
10195 length = phba->sli4_hba.max_cfg_param.max_xri -
10196 lpfc_sli4_get_els_iocb_cnt(phba);
10197 if (phba->cfg_hba_queue_depth > length) {
10198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10199 "3361 HBA queue depth changed from %d to %d\n",
10200 phba->cfg_hba_queue_depth, length);
10201 phba->cfg_hba_queue_depth = length;
10204 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10205 LPFC_SLI_INTF_IF_TYPE_2)
10208 /* get the pf# and vf# for SLI4 if_type 2 port */
10209 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10210 sizeof(struct lpfc_sli4_cfg_mhdr));
10211 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10212 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10213 length, LPFC_SLI4_MBX_EMBED);
10215 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10216 shdr = (union lpfc_sli4_cfg_shdr *)
10217 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10218 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10219 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10220 if (rc2 || shdr_status || shdr_add_status) {
10221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10222 "3026 Mailbox failed , mbxCmd x%x "
10223 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10224 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10225 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10229 /* search for fc_fcoe resrouce descriptor */
10230 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10232 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10233 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10234 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10235 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10236 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10237 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10240 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10241 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10242 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10243 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10244 phba->sli4_hba.iov.pf_number =
10245 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10246 phba->sli4_hba.iov.vf_number =
10247 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10252 if (i < LPFC_RSRC_DESC_MAX_NUM)
10253 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10254 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10255 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10256 phba->sli4_hba.iov.vf_number);
10258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10259 "3028 GET_FUNCTION_CONFIG: failed to find "
10260 "Resource Descriptor:x%x\n",
10261 LPFC_RSRC_DESC_TYPE_FCFCOE);
10264 mempool_free(pmb, phba->mbox_mem_pool);
10269 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10270 * @phba: pointer to lpfc hba data structure.
10272 * This routine is invoked to setup the port-side endian order when
10273 * the port if_type is 0. This routine has no function for other
10278 * -ENOMEM - No available memory
10279 * -EIO - The mailbox failed to complete successfully.
10282 lpfc_setup_endian_order(struct lpfc_hba *phba)
10284 LPFC_MBOXQ_t *mboxq;
10285 uint32_t if_type, rc = 0;
10286 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10287 HOST_ENDIAN_HIGH_WORD1};
10289 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10291 case LPFC_SLI_INTF_IF_TYPE_0:
10292 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10296 "0492 Unable to allocate memory for "
10297 "issuing SLI_CONFIG_SPECIAL mailbox "
10303 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10304 * two words to contain special data values and no other data.
10306 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10307 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10308 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10309 if (rc != MBX_SUCCESS) {
10310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10311 "0493 SLI_CONFIG_SPECIAL mailbox "
10312 "failed with status x%x\n",
10316 mempool_free(mboxq, phba->mbox_mem_pool);
10318 case LPFC_SLI_INTF_IF_TYPE_6:
10319 case LPFC_SLI_INTF_IF_TYPE_2:
10320 case LPFC_SLI_INTF_IF_TYPE_1:
10328 * lpfc_sli4_queue_verify - Verify and update EQ counts
10329 * @phba: pointer to lpfc hba data structure.
10331 * This routine is invoked to check the user settable queue counts for EQs.
10332 * After this routine is called the counts will be set to valid values that
10333 * adhere to the constraints of the system's interrupt vectors and the port's
10338 * -ENOMEM - No available memory
10341 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10344 * Sanity check for configured queue parameters against the run-time
10345 * device parameters
10348 if (phba->nvmet_support) {
10349 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10350 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10351 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10352 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10356 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10357 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10358 phba->cfg_nvmet_mrq);
10360 /* Get EQ depth from module parameter, fake the default for now */
10361 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10362 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10364 /* Get CQ depth from module parameter, fake the default for now */
10365 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10366 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10371 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10373 struct lpfc_queue *qdesc;
10377 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10378 /* Create Fast Path IO CQs */
10379 if (phba->enab_exp_wqcq_pages)
10380 /* Increase the CQ size when WQEs contain an embedded cdb */
10381 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10382 phba->sli4_hba.cq_esize,
10383 LPFC_CQE_EXP_COUNT, cpu);
10386 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10387 phba->sli4_hba.cq_esize,
10388 phba->sli4_hba.cq_ecount, cpu);
10390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10391 "0499 Failed allocate fast-path IO CQ (%d)\n",
10395 qdesc->qe_valid = 1;
10397 qdesc->chann = cpu;
10398 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10400 /* Create Fast Path IO WQs */
10401 if (phba->enab_exp_wqcq_pages) {
10402 /* Increase the WQ size when WQEs contain an embedded cdb */
10403 wqesize = (phba->fcp_embed_io) ?
10404 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10405 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10407 LPFC_WQE_EXP_COUNT, cpu);
10409 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10410 phba->sli4_hba.wq_esize,
10411 phba->sli4_hba.wq_ecount, cpu);
10414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415 "0503 Failed allocate fast-path IO WQ (%d)\n",
10420 qdesc->chann = cpu;
10421 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10422 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10427 * lpfc_sli4_queue_create - Create all the SLI4 queues
10428 * @phba: pointer to lpfc hba data structure.
10430 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10431 * operation. For each SLI4 queue type, the parameters such as queue entry
10432 * count (queue depth) shall be taken from the module parameter. For now,
10433 * we just use some constant number as place holder.
10437 * -ENOMEM - No availble memory
10438 * -EIO - The mailbox failed to complete successfully.
10441 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10443 struct lpfc_queue *qdesc;
10444 int idx, cpu, eqcpu;
10445 struct lpfc_sli4_hdw_queue *qp;
10446 struct lpfc_vector_map_info *cpup;
10447 struct lpfc_vector_map_info *eqcpup;
10448 struct lpfc_eq_intr_info *eqi;
10451 * Create HBA Record arrays.
10452 * Both NVME and FCP will share that same vectors / EQs
10454 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10455 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10456 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10457 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10458 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10459 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10460 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10461 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10462 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10463 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10465 if (!phba->sli4_hba.hdwq) {
10466 phba->sli4_hba.hdwq = kcalloc(
10467 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10469 if (!phba->sli4_hba.hdwq) {
10470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10471 "6427 Failed allocate memory for "
10472 "fast-path Hardware Queue array\n");
10475 /* Prepare hardware queues to take IO buffers */
10476 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10477 qp = &phba->sli4_hba.hdwq[idx];
10478 spin_lock_init(&qp->io_buf_list_get_lock);
10479 spin_lock_init(&qp->io_buf_list_put_lock);
10480 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10481 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10482 qp->get_io_bufs = 0;
10483 qp->put_io_bufs = 0;
10484 qp->total_io_bufs = 0;
10485 spin_lock_init(&qp->abts_io_buf_list_lock);
10486 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10487 qp->abts_scsi_io_bufs = 0;
10488 qp->abts_nvme_io_bufs = 0;
10489 INIT_LIST_HEAD(&qp->sgl_list);
10490 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10491 spin_lock_init(&qp->hdwq_lock);
10495 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10496 if (phba->nvmet_support) {
10497 phba->sli4_hba.nvmet_cqset = kcalloc(
10498 phba->cfg_nvmet_mrq,
10499 sizeof(struct lpfc_queue *),
10501 if (!phba->sli4_hba.nvmet_cqset) {
10502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10503 "3121 Fail allocate memory for "
10504 "fast-path CQ set array\n");
10507 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10508 phba->cfg_nvmet_mrq,
10509 sizeof(struct lpfc_queue *),
10511 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10513 "3122 Fail allocate memory for "
10514 "fast-path RQ set hdr array\n");
10517 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10518 phba->cfg_nvmet_mrq,
10519 sizeof(struct lpfc_queue *),
10521 if (!phba->sli4_hba.nvmet_mrq_data) {
10522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10523 "3124 Fail allocate memory for "
10524 "fast-path RQ set data array\n");
10530 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10532 /* Create HBA Event Queues (EQs) */
10533 for_each_present_cpu(cpu) {
10534 /* We only want to create 1 EQ per vector, even though
10535 * multiple CPUs might be using that vector. so only
10536 * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10538 cpup = &phba->sli4_hba.cpu_map[cpu];
10539 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10542 /* Get a ptr to the Hardware Queue associated with this CPU */
10543 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10545 /* Allocate an EQ */
10546 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10547 phba->sli4_hba.eq_esize,
10548 phba->sli4_hba.eq_ecount, cpu);
10550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10551 "0497 Failed allocate EQ (%d)\n",
10555 qdesc->qe_valid = 1;
10556 qdesc->hdwq = cpup->hdwq;
10557 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10558 qdesc->last_cpu = qdesc->chann;
10560 /* Save the allocated EQ in the Hardware Queue */
10561 qp->hba_eq = qdesc;
10563 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10564 list_add(&qdesc->cpu_list, &eqi->list);
10567 /* Now we need to populate the other Hardware Queues, that share
10568 * an IRQ vector, with the associated EQ ptr.
10570 for_each_present_cpu(cpu) {
10571 cpup = &phba->sli4_hba.cpu_map[cpu];
10573 /* Check for EQ already allocated in previous loop */
10574 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10577 /* Check for multiple CPUs per hdwq */
10578 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10582 /* We need to share an EQ for this hdwq */
10583 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10584 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10585 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10588 /* Allocate IO Path SLI4 CQ/WQs */
10589 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10590 if (lpfc_alloc_io_wq_cq(phba, idx))
10594 if (phba->nvmet_support) {
10595 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10596 cpu = lpfc_find_cpu_handle(phba, idx,
10597 LPFC_FIND_BY_HDWQ);
10598 qdesc = lpfc_sli4_queue_alloc(phba,
10599 LPFC_DEFAULT_PAGE_SIZE,
10600 phba->sli4_hba.cq_esize,
10601 phba->sli4_hba.cq_ecount,
10604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10605 "3142 Failed allocate NVME "
10606 "CQ Set (%d)\n", idx);
10609 qdesc->qe_valid = 1;
10611 qdesc->chann = cpu;
10612 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10617 * Create Slow Path Completion Queues (CQs)
10620 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10621 /* Create slow-path Mailbox Command Complete Queue */
10622 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10623 phba->sli4_hba.cq_esize,
10624 phba->sli4_hba.cq_ecount, cpu);
10626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10627 "0500 Failed allocate slow-path mailbox CQ\n");
10630 qdesc->qe_valid = 1;
10631 phba->sli4_hba.mbx_cq = qdesc;
10633 /* Create slow-path ELS Complete Queue */
10634 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10635 phba->sli4_hba.cq_esize,
10636 phba->sli4_hba.cq_ecount, cpu);
10638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10639 "0501 Failed allocate slow-path ELS CQ\n");
10642 qdesc->qe_valid = 1;
10643 qdesc->chann = cpu;
10644 phba->sli4_hba.els_cq = qdesc;
10648 * Create Slow Path Work Queues (WQs)
10651 /* Create Mailbox Command Queue */
10653 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10654 phba->sli4_hba.mq_esize,
10655 phba->sli4_hba.mq_ecount, cpu);
10657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10658 "0505 Failed allocate slow-path MQ\n");
10661 qdesc->chann = cpu;
10662 phba->sli4_hba.mbx_wq = qdesc;
10665 * Create ELS Work Queues
10668 /* Create slow-path ELS Work Queue */
10669 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10670 phba->sli4_hba.wq_esize,
10671 phba->sli4_hba.wq_ecount, cpu);
10673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10674 "0504 Failed allocate slow-path ELS WQ\n");
10677 qdesc->chann = cpu;
10678 phba->sli4_hba.els_wq = qdesc;
10679 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10681 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10682 /* Create NVME LS Complete Queue */
10683 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10684 phba->sli4_hba.cq_esize,
10685 phba->sli4_hba.cq_ecount, cpu);
10687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10688 "6079 Failed allocate NVME LS CQ\n");
10691 qdesc->chann = cpu;
10692 qdesc->qe_valid = 1;
10693 phba->sli4_hba.nvmels_cq = qdesc;
10695 /* Create NVME LS Work Queue */
10696 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10697 phba->sli4_hba.wq_esize,
10698 phba->sli4_hba.wq_ecount, cpu);
10700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10701 "6080 Failed allocate NVME LS WQ\n");
10704 qdesc->chann = cpu;
10705 phba->sli4_hba.nvmels_wq = qdesc;
10706 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10710 * Create Receive Queue (RQ)
10713 /* Create Receive Queue for header */
10714 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10715 phba->sli4_hba.rq_esize,
10716 phba->sli4_hba.rq_ecount, cpu);
10718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10719 "0506 Failed allocate receive HRQ\n");
10722 phba->sli4_hba.hdr_rq = qdesc;
10724 /* Create Receive Queue for data */
10725 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10726 phba->sli4_hba.rq_esize,
10727 phba->sli4_hba.rq_ecount, cpu);
10729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10730 "0507 Failed allocate receive DRQ\n");
10733 phba->sli4_hba.dat_rq = qdesc;
10735 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10736 phba->nvmet_support) {
10737 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10738 cpu = lpfc_find_cpu_handle(phba, idx,
10739 LPFC_FIND_BY_HDWQ);
10740 /* Create NVMET Receive Queue for header */
10741 qdesc = lpfc_sli4_queue_alloc(phba,
10742 LPFC_DEFAULT_PAGE_SIZE,
10743 phba->sli4_hba.rq_esize,
10744 LPFC_NVMET_RQE_DEF_COUNT,
10747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10748 "3146 Failed allocate "
10753 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10755 /* Only needed for header of RQ pair */
10756 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10759 if (qdesc->rqbp == NULL) {
10760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10761 "6131 Failed allocate "
10766 /* Put list in known state in case driver load fails. */
10767 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10769 /* Create NVMET Receive Queue for data */
10770 qdesc = lpfc_sli4_queue_alloc(phba,
10771 LPFC_DEFAULT_PAGE_SIZE,
10772 phba->sli4_hba.rq_esize,
10773 LPFC_NVMET_RQE_DEF_COUNT,
10776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10777 "3156 Failed allocate "
10782 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10786 /* Clear NVME stats */
10787 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10788 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10789 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10790 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10794 /* Clear SCSI stats */
10795 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10796 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10797 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10798 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10805 lpfc_sli4_queue_destroy(phba);
10810 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10813 lpfc_sli4_queue_free(*qp);
10819 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10826 for (idx = 0; idx < max; idx++)
10827 __lpfc_sli4_release_queue(&(*qs)[idx]);
10834 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10836 struct lpfc_sli4_hdw_queue *hdwq;
10837 struct lpfc_queue *eq;
10840 hdwq = phba->sli4_hba.hdwq;
10842 /* Loop thru all Hardware Queues */
10843 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10844 /* Free the CQ/WQ corresponding to the Hardware Queue */
10845 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10846 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10847 hdwq[idx].hba_eq = NULL;
10848 hdwq[idx].io_cq = NULL;
10849 hdwq[idx].io_wq = NULL;
10850 if (phba->cfg_xpsgl && !phba->nvmet_support)
10851 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10852 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10854 /* Loop thru all IRQ vectors */
10855 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10856 /* Free the EQ corresponding to the IRQ vector */
10857 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10858 lpfc_sli4_queue_free(eq);
10859 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10864 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10865 * @phba: pointer to lpfc hba data structure.
10867 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10872 * -ENOMEM - No available memory
10873 * -EIO - The mailbox failed to complete successfully.
10876 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10879 * Set FREE_INIT before beginning to free the queues.
10880 * Wait until the users of queues to acknowledge to
10881 * release queues by clearing FREE_WAIT.
10883 spin_lock_irq(&phba->hbalock);
10884 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10885 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10886 spin_unlock_irq(&phba->hbalock);
10888 spin_lock_irq(&phba->hbalock);
10890 spin_unlock_irq(&phba->hbalock);
10892 lpfc_sli4_cleanup_poll_list(phba);
10894 /* Release HBA eqs */
10895 if (phba->sli4_hba.hdwq)
10896 lpfc_sli4_release_hdwq(phba);
10898 if (phba->nvmet_support) {
10899 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10900 phba->cfg_nvmet_mrq);
10902 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10903 phba->cfg_nvmet_mrq);
10904 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10905 phba->cfg_nvmet_mrq);
10908 /* Release mailbox command work queue */
10909 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10911 /* Release ELS work queue */
10912 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10914 /* Release ELS work queue */
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10917 /* Release unsolicited receive queue */
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10919 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10921 /* Release ELS complete queue */
10922 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10924 /* Release NVME LS complete queue */
10925 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10927 /* Release mailbox command complete queue */
10928 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10930 /* Everything on this list has been freed */
10931 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10933 /* Done with freeing the queues */
10934 spin_lock_irq(&phba->hbalock);
10935 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10936 spin_unlock_irq(&phba->hbalock);
10940 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10942 struct lpfc_rqb *rqbp;
10943 struct lpfc_dmabuf *h_buf;
10944 struct rqb_dmabuf *rqb_buffer;
10947 while (!list_empty(&rqbp->rqb_buffer_list)) {
10948 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10949 struct lpfc_dmabuf, list);
10951 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10952 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10953 rqbp->buffer_count--;
10959 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10960 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10961 int qidx, uint32_t qtype)
10963 struct lpfc_sli_ring *pring;
10966 if (!eq || !cq || !wq) {
10967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10968 "6085 Fast-path %s (%d) not allocated\n",
10969 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10973 /* create the Cq first */
10974 rc = lpfc_cq_create(phba, cq, eq,
10975 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10978 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10979 qidx, (uint32_t)rc);
10983 if (qtype != LPFC_MBOX) {
10984 /* Setup cq_map for fast lookup */
10986 *cq_map = cq->queue_id;
10988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10989 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10990 qidx, cq->queue_id, qidx, eq->queue_id);
10992 /* create the wq */
10993 rc = lpfc_wq_create(phba, wq, cq, qtype);
10995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10997 qidx, (uint32_t)rc);
10998 /* no need to tear down cq - caller will do so */
11002 /* Bind this CQ/WQ to the NVME ring */
11004 pring->sli.sli4.wqp = (void *)wq;
11007 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11008 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11009 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11011 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11014 "0539 Failed setup of slow-path MQ: "
11015 "rc = 0x%x\n", rc);
11016 /* no need to tear down cq - caller will do so */
11020 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11021 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11022 phba->sli4_hba.mbx_wq->queue_id,
11023 phba->sli4_hba.mbx_cq->queue_id);
11030 * lpfc_setup_cq_lookup - Setup the CQ lookup table
11031 * @phba: pointer to lpfc hba data structure.
11033 * This routine will populate the cq_lookup table by all
11034 * available CQ queue_id's.
11037 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11039 struct lpfc_queue *eq, *childq;
11042 memset(phba->sli4_hba.cq_lookup, 0,
11043 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11044 /* Loop thru all IRQ vectors */
11045 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11046 /* Get the EQ corresponding to the IRQ vector */
11047 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11050 /* Loop through all CQs associated with that EQ */
11051 list_for_each_entry(childq, &eq->child_list, list) {
11052 if (childq->queue_id > phba->sli4_hba.cq_max)
11054 if (childq->subtype == LPFC_IO)
11055 phba->sli4_hba.cq_lookup[childq->queue_id] =
11062 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11063 * @phba: pointer to lpfc hba data structure.
11065 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11070 * -ENOMEM - No available memory
11071 * -EIO - The mailbox failed to complete successfully.
11074 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11076 uint32_t shdr_status, shdr_add_status;
11077 union lpfc_sli4_cfg_shdr *shdr;
11078 struct lpfc_vector_map_info *cpup;
11079 struct lpfc_sli4_hdw_queue *qp;
11080 LPFC_MBOXQ_t *mboxq;
11082 uint32_t length, usdelay;
11085 /* Check for dual-ULP support */
11086 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11089 "3249 Unable to allocate memory for "
11090 "QUERY_FW_CFG mailbox command\n");
11093 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11094 sizeof(struct lpfc_sli4_cfg_mhdr));
11095 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11096 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11097 length, LPFC_SLI4_MBX_EMBED);
11099 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11101 shdr = (union lpfc_sli4_cfg_shdr *)
11102 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11103 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11104 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11105 if (shdr_status || shdr_add_status || rc) {
11106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11107 "3250 QUERY_FW_CFG mailbox failed with status "
11108 "x%x add_status x%x, mbx status x%x\n",
11109 shdr_status, shdr_add_status, rc);
11110 mempool_free(mboxq, phba->mbox_mem_pool);
11115 phba->sli4_hba.fw_func_mode =
11116 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11117 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11118 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11119 phba->sli4_hba.physical_port =
11120 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11122 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11123 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11124 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11126 mempool_free(mboxq, phba->mbox_mem_pool);
11129 * Set up HBA Event Queues (EQs)
11131 qp = phba->sli4_hba.hdwq;
11133 /* Set up HBA event queue */
11135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11136 "3147 Fast-path EQs not allocated\n");
11141 /* Loop thru all IRQ vectors */
11142 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11143 /* Create HBA Event Queues (EQs) in order */
11144 for_each_present_cpu(cpu) {
11145 cpup = &phba->sli4_hba.cpu_map[cpu];
11147 /* Look for the CPU thats using that vector with
11148 * LPFC_CPU_FIRST_IRQ set.
11150 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11152 if (qidx != cpup->eq)
11155 /* Create an EQ for that vector */
11156 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11157 phba->cfg_fcp_imax);
11159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11160 "0523 Failed setup of fast-path"
11161 " EQ (%d), rc = 0x%x\n",
11162 cpup->eq, (uint32_t)rc);
11166 /* Save the EQ for that vector in the hba_eq_hdl */
11167 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11168 qp[cpup->hdwq].hba_eq;
11170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11171 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11173 qp[cpup->hdwq].hba_eq->queue_id);
11177 /* Loop thru all Hardware Queues */
11178 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11179 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11180 cpup = &phba->sli4_hba.cpu_map[cpu];
11182 /* Create the CQ/WQ corresponding to the Hardware Queue */
11183 rc = lpfc_create_wq_cq(phba,
11184 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11187 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11192 "0535 Failed to setup fastpath "
11193 "IO WQ/CQ (%d), rc = 0x%x\n",
11194 qidx, (uint32_t)rc);
11200 * Set up Slow Path Complete Queues (CQs)
11203 /* Set up slow-path MBOX CQ/MQ */
11205 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11207 "0528 %s not allocated\n",
11208 phba->sli4_hba.mbx_cq ?
11209 "Mailbox WQ" : "Mailbox CQ");
11214 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11215 phba->sli4_hba.mbx_cq,
11216 phba->sli4_hba.mbx_wq,
11217 NULL, 0, LPFC_MBOX);
11219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11220 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11224 if (phba->nvmet_support) {
11225 if (!phba->sli4_hba.nvmet_cqset) {
11226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11227 "3165 Fast-path NVME CQ Set "
11228 "array not allocated\n");
11232 if (phba->cfg_nvmet_mrq > 1) {
11233 rc = lpfc_cq_create_set(phba,
11234 phba->sli4_hba.nvmet_cqset,
11236 LPFC_WCQ, LPFC_NVMET);
11238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11239 "3164 Failed setup of NVME CQ "
11240 "Set, rc = 0x%x\n",
11245 /* Set up NVMET Receive Complete Queue */
11246 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11248 LPFC_WCQ, LPFC_NVMET);
11250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11251 "6089 Failed setup NVMET CQ: "
11252 "rc = 0x%x\n", (uint32_t)rc);
11255 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11257 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11258 "6090 NVMET CQ setup: cq-id=%d, "
11259 "parent eq-id=%d\n",
11260 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11261 qp[0].hba_eq->queue_id);
11265 /* Set up slow-path ELS WQ/CQ */
11266 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11268 "0530 ELS %s not allocated\n",
11269 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11273 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11274 phba->sli4_hba.els_cq,
11275 phba->sli4_hba.els_wq,
11276 NULL, 0, LPFC_ELS);
11278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11279 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11284 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11285 phba->sli4_hba.els_wq->queue_id,
11286 phba->sli4_hba.els_cq->queue_id);
11288 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11289 /* Set up NVME LS Complete Queue */
11290 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11292 "6091 LS %s not allocated\n",
11293 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11297 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11298 phba->sli4_hba.nvmels_cq,
11299 phba->sli4_hba.nvmels_wq,
11300 NULL, 0, LPFC_NVME_LS);
11302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11303 "0526 Failed setup of NVVME LS WQ/CQ: "
11304 "rc = 0x%x\n", (uint32_t)rc);
11308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11309 "6096 ELS WQ setup: wq-id=%d, "
11310 "parent cq-id=%d\n",
11311 phba->sli4_hba.nvmels_wq->queue_id,
11312 phba->sli4_hba.nvmels_cq->queue_id);
11316 * Create NVMET Receive Queue (RQ)
11318 if (phba->nvmet_support) {
11319 if ((!phba->sli4_hba.nvmet_cqset) ||
11320 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11321 (!phba->sli4_hba.nvmet_mrq_data)) {
11322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11323 "6130 MRQ CQ Queues not "
11328 if (phba->cfg_nvmet_mrq > 1) {
11329 rc = lpfc_mrq_create(phba,
11330 phba->sli4_hba.nvmet_mrq_hdr,
11331 phba->sli4_hba.nvmet_mrq_data,
11332 phba->sli4_hba.nvmet_cqset,
11335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11336 "6098 Failed setup of NVMET "
11337 "MRQ: rc = 0x%x\n",
11343 rc = lpfc_rq_create(phba,
11344 phba->sli4_hba.nvmet_mrq_hdr[0],
11345 phba->sli4_hba.nvmet_mrq_data[0],
11346 phba->sli4_hba.nvmet_cqset[0],
11349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11350 "6057 Failed setup of NVMET "
11351 "Receive Queue: rc = 0x%x\n",
11357 phba, KERN_INFO, LOG_INIT,
11358 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11359 "dat-rq-id=%d parent cq-id=%d\n",
11360 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11361 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11362 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11367 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11369 "0540 Receive Queue not allocated\n");
11374 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11375 phba->sli4_hba.els_cq, LPFC_USOL);
11377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11378 "0541 Failed setup of Receive Queue: "
11379 "rc = 0x%x\n", (uint32_t)rc);
11383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11384 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11385 "parent cq-id=%d\n",
11386 phba->sli4_hba.hdr_rq->queue_id,
11387 phba->sli4_hba.dat_rq->queue_id,
11388 phba->sli4_hba.els_cq->queue_id);
11390 if (phba->cfg_fcp_imax)
11391 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11395 for (qidx = 0; qidx < phba->cfg_irq_chann;
11396 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11397 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11400 if (phba->sli4_hba.cq_max) {
11401 kfree(phba->sli4_hba.cq_lookup);
11402 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11403 sizeof(struct lpfc_queue *), GFP_KERNEL);
11404 if (!phba->sli4_hba.cq_lookup) {
11405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11406 "0549 Failed setup of CQ Lookup table: "
11407 "size 0x%x\n", phba->sli4_hba.cq_max);
11411 lpfc_setup_cq_lookup(phba);
11416 lpfc_sli4_queue_unset(phba);
11422 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11423 * @phba: pointer to lpfc hba data structure.
11425 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11430 * -ENOMEM - No available memory
11431 * -EIO - The mailbox failed to complete successfully.
11434 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11436 struct lpfc_sli4_hdw_queue *qp;
11437 struct lpfc_queue *eq;
11440 /* Unset mailbox command work queue */
11441 if (phba->sli4_hba.mbx_wq)
11442 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11444 /* Unset NVME LS work queue */
11445 if (phba->sli4_hba.nvmels_wq)
11446 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11448 /* Unset ELS work queue */
11449 if (phba->sli4_hba.els_wq)
11450 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11452 /* Unset unsolicited receive queue */
11453 if (phba->sli4_hba.hdr_rq)
11454 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11455 phba->sli4_hba.dat_rq);
11457 /* Unset mailbox command complete queue */
11458 if (phba->sli4_hba.mbx_cq)
11459 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11461 /* Unset ELS complete queue */
11462 if (phba->sli4_hba.els_cq)
11463 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11465 /* Unset NVME LS complete queue */
11466 if (phba->sli4_hba.nvmels_cq)
11467 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11469 if (phba->nvmet_support) {
11470 /* Unset NVMET MRQ queue */
11471 if (phba->sli4_hba.nvmet_mrq_hdr) {
11472 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11475 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11476 phba->sli4_hba.nvmet_mrq_data[qidx]);
11479 /* Unset NVMET CQ Set complete queue */
11480 if (phba->sli4_hba.nvmet_cqset) {
11481 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11483 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11487 /* Unset fast-path SLI4 queues */
11488 if (phba->sli4_hba.hdwq) {
11489 /* Loop thru all Hardware Queues */
11490 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11491 /* Destroy the CQ/WQ corresponding to Hardware Queue */
11492 qp = &phba->sli4_hba.hdwq[qidx];
11493 lpfc_wq_destroy(phba, qp->io_wq);
11494 lpfc_cq_destroy(phba, qp->io_cq);
11496 /* Loop thru all IRQ vectors */
11497 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11498 /* Destroy the EQ corresponding to the IRQ vector */
11499 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11500 lpfc_eq_destroy(phba, eq);
11504 kfree(phba->sli4_hba.cq_lookup);
11505 phba->sli4_hba.cq_lookup = NULL;
11506 phba->sli4_hba.cq_max = 0;
11510 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11511 * @phba: pointer to lpfc hba data structure.
11513 * This routine is invoked to allocate and set up a pool of completion queue
11514 * events. The body of the completion queue event is a completion queue entry
11515 * CQE. For now, this pool is used for the interrupt service routine to queue
11516 * the following HBA completion queue events for the worker thread to process:
11517 * - Mailbox asynchronous events
11518 * - Receive queue completion unsolicited events
11519 * Later, this can be used for all the slow-path events.
11523 * -ENOMEM - No available memory
11526 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11528 struct lpfc_cq_event *cq_event;
11531 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11532 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11534 goto out_pool_create_fail;
11535 list_add_tail(&cq_event->list,
11536 &phba->sli4_hba.sp_cqe_event_pool);
11540 out_pool_create_fail:
11541 lpfc_sli4_cq_event_pool_destroy(phba);
11546 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11547 * @phba: pointer to lpfc hba data structure.
11549 * This routine is invoked to free the pool of completion queue events at
11550 * driver unload time. Note that, it is the responsibility of the driver
11551 * cleanup routine to free all the outstanding completion-queue events
11552 * allocated from this pool back into the pool before invoking this routine
11553 * to destroy the pool.
11556 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11558 struct lpfc_cq_event *cq_event, *next_cq_event;
11560 list_for_each_entry_safe(cq_event, next_cq_event,
11561 &phba->sli4_hba.sp_cqe_event_pool, list) {
11562 list_del(&cq_event->list);
11568 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11569 * @phba: pointer to lpfc hba data structure.
11571 * This routine is the lock free version of the API invoked to allocate a
11572 * completion-queue event from the free pool.
11574 * Return: Pointer to the newly allocated completion-queue event if successful
11577 struct lpfc_cq_event *
11578 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11580 struct lpfc_cq_event *cq_event = NULL;
11582 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11583 struct lpfc_cq_event, list);
11588 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11589 * @phba: pointer to lpfc hba data structure.
11591 * This routine is the lock version of the API invoked to allocate a
11592 * completion-queue event from the free pool.
11594 * Return: Pointer to the newly allocated completion-queue event if successful
11597 struct lpfc_cq_event *
11598 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11600 struct lpfc_cq_event *cq_event;
11601 unsigned long iflags;
11603 spin_lock_irqsave(&phba->hbalock, iflags);
11604 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11605 spin_unlock_irqrestore(&phba->hbalock, iflags);
11610 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11611 * @phba: pointer to lpfc hba data structure.
11612 * @cq_event: pointer to the completion queue event to be freed.
11614 * This routine is the lock free version of the API invoked to release a
11615 * completion-queue event back into the free pool.
11618 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11619 struct lpfc_cq_event *cq_event)
11621 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11625 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11626 * @phba: pointer to lpfc hba data structure.
11627 * @cq_event: pointer to the completion queue event to be freed.
11629 * This routine is the lock version of the API invoked to release a
11630 * completion-queue event back into the free pool.
11633 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11634 struct lpfc_cq_event *cq_event)
11636 unsigned long iflags;
11637 spin_lock_irqsave(&phba->hbalock, iflags);
11638 __lpfc_sli4_cq_event_release(phba, cq_event);
11639 spin_unlock_irqrestore(&phba->hbalock, iflags);
11643 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11644 * @phba: pointer to lpfc hba data structure.
11646 * This routine is to free all the pending completion-queue events to the
11647 * back into the free pool for device reset.
11650 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11652 LIST_HEAD(cq_event_list);
11653 struct lpfc_cq_event *cq_event;
11654 unsigned long iflags;
11656 /* Retrieve all the pending WCQEs from pending WCQE lists */
11658 /* Pending ELS XRI abort events */
11659 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11660 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11662 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11664 /* Pending asynnc events */
11665 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11666 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11668 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11670 while (!list_empty(&cq_event_list)) {
11671 list_remove_head(&cq_event_list, cq_event,
11672 struct lpfc_cq_event, list);
11673 lpfc_sli4_cq_event_release(phba, cq_event);
11678 * lpfc_pci_function_reset - Reset pci function.
11679 * @phba: pointer to lpfc hba data structure.
11681 * This routine is invoked to request a PCI function reset. It will destroys
11682 * all resources assigned to the PCI function which originates this request.
11686 * -ENOMEM - No available memory
11687 * -EIO - The mailbox failed to complete successfully.
11690 lpfc_pci_function_reset(struct lpfc_hba *phba)
11692 LPFC_MBOXQ_t *mboxq;
11693 uint32_t rc = 0, if_type;
11694 uint32_t shdr_status, shdr_add_status;
11696 uint32_t port_reset = 0;
11697 union lpfc_sli4_cfg_shdr *shdr;
11698 struct lpfc_register reg_data;
11701 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11703 case LPFC_SLI_INTF_IF_TYPE_0:
11704 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11708 "0494 Unable to allocate memory for "
11709 "issuing SLI_FUNCTION_RESET mailbox "
11714 /* Setup PCI function reset mailbox-ioctl command */
11715 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11716 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11717 LPFC_SLI4_MBX_EMBED);
11718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11719 shdr = (union lpfc_sli4_cfg_shdr *)
11720 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11721 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11722 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11724 mempool_free(mboxq, phba->mbox_mem_pool);
11725 if (shdr_status || shdr_add_status || rc) {
11726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11727 "0495 SLI_FUNCTION_RESET mailbox "
11728 "failed with status x%x add_status x%x,"
11729 " mbx status x%x\n",
11730 shdr_status, shdr_add_status, rc);
11734 case LPFC_SLI_INTF_IF_TYPE_2:
11735 case LPFC_SLI_INTF_IF_TYPE_6:
11738 * Poll the Port Status Register and wait for RDY for
11739 * up to 30 seconds. If the port doesn't respond, treat
11742 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11743 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11744 STATUSregaddr, ®_data.word0)) {
11748 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11753 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11754 phba->work_status[0] = readl(
11755 phba->sli4_hba.u.if_type2.ERR1regaddr);
11756 phba->work_status[1] = readl(
11757 phba->sli4_hba.u.if_type2.ERR2regaddr);
11758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11759 "2890 Port not ready, port status reg "
11760 "0x%x error 1=0x%x, error 2=0x%x\n",
11762 phba->work_status[0],
11763 phba->work_status[1]);
11768 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11769 lpfc_pldv_detect = true;
11773 * Reset the port now
11775 reg_data.word0 = 0;
11776 bf_set(lpfc_sliport_ctrl_end, ®_data,
11777 LPFC_SLIPORT_LITTLE_ENDIAN);
11778 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11779 LPFC_SLIPORT_INIT_PORT);
11780 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11783 pci_read_config_word(phba->pcidev,
11784 PCI_DEVICE_ID, &devid);
11789 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11795 case LPFC_SLI_INTF_IF_TYPE_1:
11801 /* Catch the not-ready port failure after a port reset. */
11803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11804 "3317 HBA not functional: IP Reset Failed "
11805 "try: echo fw_reset > board_mode\n");
11813 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11814 * @phba: pointer to lpfc hba data structure.
11816 * This routine is invoked to set up the PCI device memory space for device
11817 * with SLI-4 interface spec.
11821 * other values - error
11824 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11826 struct pci_dev *pdev = phba->pcidev;
11827 unsigned long bar0map_len, bar1map_len, bar2map_len;
11834 /* Set the device DMA mask size */
11835 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11837 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11842 * The BARs and register set definitions and offset locations are
11843 * dependent on the if_type.
11845 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11846 &phba->sli4_hba.sli_intf.word0)) {
11850 /* There is no SLI3 failback for SLI4 devices. */
11851 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11852 LPFC_SLI_INTF_VALID) {
11853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11854 "2894 SLI_INTF reg contents invalid "
11855 "sli_intf reg 0x%x\n",
11856 phba->sli4_hba.sli_intf.word0);
11860 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11862 * Get the bus address of SLI4 device Bar regions and the
11863 * number of bytes required by each mapping. The mapping of the
11864 * particular PCI BARs regions is dependent on the type of
11867 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11868 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11869 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11872 * Map SLI4 PCI Config Space Register base to a kernel virtual
11875 phba->sli4_hba.conf_regs_memmap_p =
11876 ioremap(phba->pci_bar0_map, bar0map_len);
11877 if (!phba->sli4_hba.conf_regs_memmap_p) {
11878 dev_printk(KERN_ERR, &pdev->dev,
11879 "ioremap failed for SLI4 PCI config "
11883 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11884 /* Set up BAR0 PCI config space register memory map */
11885 lpfc_sli4_bar0_register_memmap(phba, if_type);
11887 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11888 bar0map_len = pci_resource_len(pdev, 1);
11889 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11890 dev_printk(KERN_ERR, &pdev->dev,
11891 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11894 phba->sli4_hba.conf_regs_memmap_p =
11895 ioremap(phba->pci_bar0_map, bar0map_len);
11896 if (!phba->sli4_hba.conf_regs_memmap_p) {
11897 dev_printk(KERN_ERR, &pdev->dev,
11898 "ioremap failed for SLI4 PCI config "
11902 lpfc_sli4_bar0_register_memmap(phba, if_type);
11905 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11906 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11908 * Map SLI4 if type 0 HBA Control Register base to a
11909 * kernel virtual address and setup the registers.
11911 phba->pci_bar1_map = pci_resource_start(pdev,
11913 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11914 phba->sli4_hba.ctrl_regs_memmap_p =
11915 ioremap(phba->pci_bar1_map,
11917 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11918 dev_err(&pdev->dev,
11919 "ioremap failed for SLI4 HBA "
11920 "control registers.\n");
11922 goto out_iounmap_conf;
11924 phba->pci_bar2_memmap_p =
11925 phba->sli4_hba.ctrl_regs_memmap_p;
11926 lpfc_sli4_bar1_register_memmap(phba, if_type);
11929 goto out_iounmap_conf;
11933 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11934 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11936 * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11937 * virtual address and setup the registers.
11939 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11940 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11941 phba->sli4_hba.drbl_regs_memmap_p =
11942 ioremap(phba->pci_bar1_map, bar1map_len);
11943 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11944 dev_err(&pdev->dev,
11945 "ioremap failed for SLI4 HBA doorbell registers.\n");
11947 goto out_iounmap_conf;
11949 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11950 lpfc_sli4_bar1_register_memmap(phba, if_type);
11953 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11954 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11956 * Map SLI4 if type 0 HBA Doorbell Register base to
11957 * a kernel virtual address and setup the registers.
11959 phba->pci_bar2_map = pci_resource_start(pdev,
11961 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11962 phba->sli4_hba.drbl_regs_memmap_p =
11963 ioremap(phba->pci_bar2_map,
11965 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11966 dev_err(&pdev->dev,
11967 "ioremap failed for SLI4 HBA"
11968 " doorbell registers.\n");
11970 goto out_iounmap_ctrl;
11972 phba->pci_bar4_memmap_p =
11973 phba->sli4_hba.drbl_regs_memmap_p;
11974 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11976 goto out_iounmap_all;
11979 goto out_iounmap_all;
11983 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11984 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11986 * Map SLI4 if type 6 HBA DPP Register base to a kernel
11987 * virtual address and setup the registers.
11989 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11990 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11991 phba->sli4_hba.dpp_regs_memmap_p =
11992 ioremap(phba->pci_bar2_map, bar2map_len);
11993 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11994 dev_err(&pdev->dev,
11995 "ioremap failed for SLI4 HBA dpp registers.\n");
11997 goto out_iounmap_ctrl;
11999 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
12002 /* Set up the EQ/CQ register handeling functions now */
12004 case LPFC_SLI_INTF_IF_TYPE_0:
12005 case LPFC_SLI_INTF_IF_TYPE_2:
12006 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12007 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12008 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12010 case LPFC_SLI_INTF_IF_TYPE_6:
12011 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12012 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12013 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12022 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12024 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12026 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12032 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12033 * @phba: pointer to lpfc hba data structure.
12035 * This routine is invoked to unset the PCI device memory space for device
12036 * with SLI-4 interface spec.
12039 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12042 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12045 case LPFC_SLI_INTF_IF_TYPE_0:
12046 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12047 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12048 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12050 case LPFC_SLI_INTF_IF_TYPE_2:
12051 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12053 case LPFC_SLI_INTF_IF_TYPE_6:
12054 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12055 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12056 if (phba->sli4_hba.dpp_regs_memmap_p)
12057 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12059 case LPFC_SLI_INTF_IF_TYPE_1:
12061 dev_printk(KERN_ERR, &phba->pcidev->dev,
12062 "FATAL - unsupported SLI4 interface type - %d\n",
12069 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12070 * @phba: pointer to lpfc hba data structure.
12072 * This routine is invoked to enable the MSI-X interrupt vectors to device
12073 * with SLI-3 interface specs.
12077 * other values - error
12080 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12085 /* Set up MSI-X multi-message vectors */
12086 rc = pci_alloc_irq_vectors(phba->pcidev,
12087 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12089 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12090 "0420 PCI enable MSI-X failed (%d)\n", rc);
12095 * Assign MSI-X vectors to interrupt handlers
12098 /* vector-0 is associated to slow-path handler */
12099 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12100 &lpfc_sli_sp_intr_handler, 0,
12101 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12103 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12104 "0421 MSI-X slow-path request_irq failed "
12109 /* vector-1 is associated to fast-path handler */
12110 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12111 &lpfc_sli_fp_intr_handler, 0,
12112 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12116 "0429 MSI-X fast-path request_irq failed "
12122 * Configure HBA MSI-X attention conditions to messages
12124 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12129 "0474 Unable to allocate memory for issuing "
12130 "MBOX_CONFIG_MSI command\n");
12133 rc = lpfc_config_msi(phba, pmb);
12136 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12137 if (rc != MBX_SUCCESS) {
12138 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12139 "0351 Config MSI mailbox command failed, "
12140 "mbxCmd x%x, mbxStatus x%x\n",
12141 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12145 /* Free memory allocated for mailbox command */
12146 mempool_free(pmb, phba->mbox_mem_pool);
12150 /* Free memory allocated for mailbox command */
12151 mempool_free(pmb, phba->mbox_mem_pool);
12154 /* free the irq already requested */
12155 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12158 /* free the irq already requested */
12159 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12162 /* Unconfigure MSI-X capability structure */
12163 pci_free_irq_vectors(phba->pcidev);
12170 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12171 * @phba: pointer to lpfc hba data structure.
12173 * This routine is invoked to enable the MSI interrupt mode to device with
12174 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12175 * enable the MSI vector. The device driver is responsible for calling the
12176 * request_irq() to register MSI vector with a interrupt the handler, which
12177 * is done in this function.
12181 * other values - error
12184 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12188 rc = pci_enable_msi(phba->pcidev);
12190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191 "0012 PCI enable MSI mode success.\n");
12193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12194 "0471 PCI enable MSI mode failed (%d)\n", rc);
12198 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12199 0, LPFC_DRIVER_NAME, phba);
12201 pci_disable_msi(phba->pcidev);
12202 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12203 "0478 MSI request_irq failed (%d)\n", rc);
12209 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12210 * @phba: pointer to lpfc hba data structure.
12211 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12213 * This routine is invoked to enable device interrupt and associate driver's
12214 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12215 * spec. Depends on the interrupt mode configured to the driver, the driver
12216 * will try to fallback from the configured interrupt mode to an interrupt
12217 * mode which is supported by the platform, kernel, and device in the order
12219 * MSI-X -> MSI -> IRQ.
12223 * other values - error
12226 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12228 uint32_t intr_mode = LPFC_INTR_ERROR;
12231 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12232 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12235 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12237 if (cfg_mode == 2) {
12238 /* Now, try to enable MSI-X interrupt mode */
12239 retval = lpfc_sli_enable_msix(phba);
12241 /* Indicate initialization to MSI-X mode */
12242 phba->intr_type = MSIX;
12247 /* Fallback to MSI if MSI-X initialization failed */
12248 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12249 retval = lpfc_sli_enable_msi(phba);
12251 /* Indicate initialization to MSI mode */
12252 phba->intr_type = MSI;
12257 /* Fallback to INTx if both MSI-X/MSI initalization failed */
12258 if (phba->intr_type == NONE) {
12259 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12260 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12262 /* Indicate initialization to INTx mode */
12263 phba->intr_type = INTx;
12271 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12272 * @phba: pointer to lpfc hba data structure.
12274 * This routine is invoked to disable device interrupt and disassociate the
12275 * driver's interrupt handler(s) from interrupt vector(s) to device with
12276 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12277 * release the interrupt vector(s) for the message signaled interrupt.
12280 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12284 if (phba->intr_type == MSIX)
12285 nr_irqs = LPFC_MSIX_VECTORS;
12289 for (i = 0; i < nr_irqs; i++)
12290 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12291 pci_free_irq_vectors(phba->pcidev);
12293 /* Reset interrupt management states */
12294 phba->intr_type = NONE;
12295 phba->sli.slistat.sli_intr = 0;
12299 * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12300 * @phba: pointer to lpfc hba data structure.
12301 * @id: EQ vector index or Hardware Queue index
12302 * @match: LPFC_FIND_BY_EQ = match by EQ
12303 * LPFC_FIND_BY_HDWQ = match by Hardware Queue
12304 * Return the CPU that matches the selection criteria
12307 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12309 struct lpfc_vector_map_info *cpup;
12312 /* Loop through all CPUs */
12313 for_each_present_cpu(cpu) {
12314 cpup = &phba->sli4_hba.cpu_map[cpu];
12316 /* If we are matching by EQ, there may be multiple CPUs using
12317 * using the same vector, so select the one with
12318 * LPFC_CPU_FIRST_IRQ set.
12320 if ((match == LPFC_FIND_BY_EQ) &&
12321 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12325 /* If matching by HDWQ, select the first CPU that matches */
12326 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12334 * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12335 * @phba: pointer to lpfc hba data structure.
12336 * @cpu: CPU map index
12337 * @phys_id: CPU package physical id
12338 * @core_id: CPU core id
12341 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12342 uint16_t phys_id, uint16_t core_id)
12344 struct lpfc_vector_map_info *cpup;
12347 for_each_present_cpu(idx) {
12348 cpup = &phba->sli4_hba.cpu_map[idx];
12349 /* Does the cpup match the one we are looking for */
12350 if ((cpup->phys_id == phys_id) &&
12351 (cpup->core_id == core_id) &&
12360 * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12361 * @phba: pointer to lpfc hba data structure.
12362 * @eqidx: index for eq and irq vector
12363 * @flag: flags to set for vector_map structure
12364 * @cpu: cpu used to index vector_map structure
12366 * The routine assigns eq info into vector_map structure
12369 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12372 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12373 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12376 cpup->flag |= flag;
12378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12379 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12380 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12384 * lpfc_cpu_map_array_init - Initialize cpu_map structure
12385 * @phba: pointer to lpfc hba data structure.
12387 * The routine initializes the cpu_map array structure
12390 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12392 struct lpfc_vector_map_info *cpup;
12393 struct lpfc_eq_intr_info *eqi;
12396 for_each_possible_cpu(cpu) {
12397 cpup = &phba->sli4_hba.cpu_map[cpu];
12398 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12399 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12400 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12401 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12403 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12404 INIT_LIST_HEAD(&eqi->list);
12410 * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12411 * @phba: pointer to lpfc hba data structure.
12413 * The routine initializes the hba_eq_hdl array structure
12416 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12418 struct lpfc_hba_eq_hdl *eqhdl;
12421 for (i = 0; i < phba->cfg_irq_chann; i++) {
12422 eqhdl = lpfc_get_eq_hdl(i);
12423 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12424 eqhdl->phba = phba;
12429 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12430 * @phba: pointer to lpfc hba data structure.
12431 * @vectors: number of msix vectors allocated.
12433 * The routine will figure out the CPU affinity assignment for every
12434 * MSI-X vector allocated for the HBA.
12435 * In addition, the CPU to IO channel mapping will be calculated
12436 * and the phba->sli4_hba.cpu_map array will reflect this.
12439 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12441 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12442 int max_phys_id, min_phys_id;
12443 int max_core_id, min_core_id;
12444 struct lpfc_vector_map_info *cpup;
12445 struct lpfc_vector_map_info *new_cpup;
12447 struct cpuinfo_x86 *cpuinfo;
12449 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12450 struct lpfc_hdwq_stat *c_stat;
12454 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12456 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12458 /* Update CPU map with physical id and core id of each CPU */
12459 for_each_present_cpu(cpu) {
12460 cpup = &phba->sli4_hba.cpu_map[cpu];
12462 cpuinfo = &cpu_data(cpu);
12463 cpup->phys_id = cpuinfo->phys_proc_id;
12464 cpup->core_id = cpuinfo->cpu_core_id;
12465 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12466 cpup->flag |= LPFC_CPU_MAP_HYPER;
12468 /* No distinction between CPUs for other platforms */
12470 cpup->core_id = cpu;
12473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12474 "3328 CPU %d physid %d coreid %d flag x%x\n",
12475 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12477 if (cpup->phys_id > max_phys_id)
12478 max_phys_id = cpup->phys_id;
12479 if (cpup->phys_id < min_phys_id)
12480 min_phys_id = cpup->phys_id;
12482 if (cpup->core_id > max_core_id)
12483 max_core_id = cpup->core_id;
12484 if (cpup->core_id < min_core_id)
12485 min_core_id = cpup->core_id;
12488 /* After looking at each irq vector assigned to this pcidev, its
12489 * possible to see that not ALL CPUs have been accounted for.
12490 * Next we will set any unassigned (unaffinitized) cpu map
12491 * entries to a IRQ on the same phys_id.
12493 first_cpu = cpumask_first(cpu_present_mask);
12494 start_cpu = first_cpu;
12496 for_each_present_cpu(cpu) {
12497 cpup = &phba->sli4_hba.cpu_map[cpu];
12499 /* Is this CPU entry unassigned */
12500 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12501 /* Mark CPU as IRQ not assigned by the kernel */
12502 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12504 /* If so, find a new_cpup thats on the the SAME
12505 * phys_id as cpup. start_cpu will start where we
12506 * left off so all unassigned entries don't get assgined
12507 * the IRQ of the first entry.
12509 new_cpu = start_cpu;
12510 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12511 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12512 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12513 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12514 (new_cpup->phys_id == cpup->phys_id))
12516 new_cpu = cpumask_next(
12517 new_cpu, cpu_present_mask);
12518 if (new_cpu == nr_cpumask_bits)
12519 new_cpu = first_cpu;
12521 /* At this point, we leave the CPU as unassigned */
12524 /* We found a matching phys_id, so copy the IRQ info */
12525 cpup->eq = new_cpup->eq;
12527 /* Bump start_cpu to the next slot to minmize the
12528 * chance of having multiple unassigned CPU entries
12529 * selecting the same IRQ.
12531 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12532 if (start_cpu == nr_cpumask_bits)
12533 start_cpu = first_cpu;
12535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12536 "3337 Set Affinity: CPU %d "
12537 "eq %d from peer cpu %d same "
12539 cpu, cpup->eq, new_cpu,
12544 /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12545 start_cpu = first_cpu;
12547 for_each_present_cpu(cpu) {
12548 cpup = &phba->sli4_hba.cpu_map[cpu];
12550 /* Is this entry unassigned */
12551 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12552 /* Mark it as IRQ not assigned by the kernel */
12553 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12555 /* If so, find a new_cpup thats on ANY phys_id
12556 * as the cpup. start_cpu will start where we
12557 * left off so all unassigned entries don't get
12558 * assigned the IRQ of the first entry.
12560 new_cpu = start_cpu;
12561 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12562 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12563 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12564 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12566 new_cpu = cpumask_next(
12567 new_cpu, cpu_present_mask);
12568 if (new_cpu == nr_cpumask_bits)
12569 new_cpu = first_cpu;
12571 /* We should never leave an entry unassigned */
12572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12573 "3339 Set Affinity: CPU %d "
12574 "eq %d UNASSIGNED\n",
12575 cpup->hdwq, cpup->eq);
12578 /* We found an available entry, copy the IRQ info */
12579 cpup->eq = new_cpup->eq;
12581 /* Bump start_cpu to the next slot to minmize the
12582 * chance of having multiple unassigned CPU entries
12583 * selecting the same IRQ.
12585 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12586 if (start_cpu == nr_cpumask_bits)
12587 start_cpu = first_cpu;
12589 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12590 "3338 Set Affinity: CPU %d "
12591 "eq %d from peer cpu %d (%d/%d)\n",
12592 cpu, cpup->eq, new_cpu,
12593 new_cpup->phys_id, new_cpup->core_id);
12597 /* Assign hdwq indices that are unique across all cpus in the map
12598 * that are also FIRST_CPUs.
12601 for_each_present_cpu(cpu) {
12602 cpup = &phba->sli4_hba.cpu_map[cpu];
12604 /* Only FIRST IRQs get a hdwq index assignment. */
12605 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12608 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12611 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12612 "3333 Set Affinity: CPU %d (phys %d core %d): "
12613 "hdwq %d eq %d flg x%x\n",
12614 cpu, cpup->phys_id, cpup->core_id,
12615 cpup->hdwq, cpup->eq, cpup->flag);
12617 /* Associate a hdwq with each cpu_map entry
12618 * This will be 1 to 1 - hdwq to cpu, unless there are less
12619 * hardware queues then CPUs. For that case we will just round-robin
12620 * the available hardware queues as they get assigned to CPUs.
12621 * The next_idx is the idx from the FIRST_CPU loop above to account
12622 * for irq_chann < hdwq. The idx is used for round-robin assignments
12623 * and needs to start at 0.
12628 for_each_present_cpu(cpu) {
12629 cpup = &phba->sli4_hba.cpu_map[cpu];
12631 /* FIRST cpus are already mapped. */
12632 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12635 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12636 * of the unassigned cpus to the next idx so that all
12637 * hdw queues are fully utilized.
12639 if (next_idx < phba->cfg_hdw_queue) {
12640 cpup->hdwq = next_idx;
12645 /* Not a First CPU and all hdw_queues are used. Reuse a
12646 * Hardware Queue for another CPU, so be smart about it
12647 * and pick one that has its IRQ/EQ mapped to the same phys_id
12648 * (CPU package) and core_id.
12650 new_cpu = start_cpu;
12651 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12652 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12653 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12654 new_cpup->phys_id == cpup->phys_id &&
12655 new_cpup->core_id == cpup->core_id) {
12658 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12659 if (new_cpu == nr_cpumask_bits)
12660 new_cpu = first_cpu;
12663 /* If we can't match both phys_id and core_id,
12664 * settle for just a phys_id match.
12666 new_cpu = start_cpu;
12667 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12668 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12669 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12670 new_cpup->phys_id == cpup->phys_id)
12673 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12674 if (new_cpu == nr_cpumask_bits)
12675 new_cpu = first_cpu;
12678 /* Otherwise just round robin on cfg_hdw_queue */
12679 cpup->hdwq = idx % phba->cfg_hdw_queue;
12683 /* We found an available entry, copy the IRQ info */
12684 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12685 if (start_cpu == nr_cpumask_bits)
12686 start_cpu = first_cpu;
12687 cpup->hdwq = new_cpup->hdwq;
12689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12690 "3335 Set Affinity: CPU %d (phys %d core %d): "
12691 "hdwq %d eq %d flg x%x\n",
12692 cpu, cpup->phys_id, cpup->core_id,
12693 cpup->hdwq, cpup->eq, cpup->flag);
12697 * Initialize the cpu_map slots for not-present cpus in case
12698 * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12701 for_each_possible_cpu(cpu) {
12702 cpup = &phba->sli4_hba.cpu_map[cpu];
12703 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12704 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12705 c_stat->hdwq_no = cpup->hdwq;
12707 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12710 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12711 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12712 c_stat->hdwq_no = cpup->hdwq;
12714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12715 "3340 Set Affinity: not present "
12716 "CPU %d hdwq %d\n",
12720 /* The cpu_map array will be used later during initialization
12721 * when EQ / CQ / WQs are allocated and configured.
12727 * lpfc_cpuhp_get_eq
12729 * @phba: pointer to lpfc hba data structure.
12730 * @cpu: cpu going offline
12731 * @eqlist: eq list to append to
12734 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12735 struct list_head *eqlist)
12737 const struct cpumask *maskp;
12738 struct lpfc_queue *eq;
12739 struct cpumask *tmp;
12742 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12746 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12747 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12751 * if irq is not affinitized to the cpu going
12752 * then we don't need to poll the eq attached
12755 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12757 /* get the cpus that are online and are affini-
12758 * tized to this irq vector. If the count is
12759 * more than 1 then cpuhp is not going to shut-
12760 * down this vector. Since this cpu has not
12761 * gone offline yet, we need >1.
12763 cpumask_and(tmp, maskp, cpu_online_mask);
12764 if (cpumask_weight(tmp) > 1)
12767 /* Now that we have an irq to shutdown, get the eq
12768 * mapped to this irq. Note: multiple hdwq's in
12769 * the software can share an eq, but eventually
12770 * only eq will be mapped to this vector
12772 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12773 list_add(&eq->_poll_list, eqlist);
12779 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12781 if (phba->sli_rev != LPFC_SLI_REV4)
12784 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12787 * unregistering the instance doesn't stop the polling
12788 * timer. Wait for the poll timer to retire.
12791 del_timer_sync(&phba->cpuhp_poll_timer);
12794 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12796 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12799 __lpfc_cpuhp_remove(phba);
12802 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12804 if (phba->sli_rev != LPFC_SLI_REV4)
12809 if (!list_empty(&phba->poll_list))
12810 mod_timer(&phba->cpuhp_poll_timer,
12811 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12815 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12819 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12821 if (phba->pport->load_flag & FC_UNLOADING) {
12826 if (phba->sli_rev != LPFC_SLI_REV4) {
12831 /* proceed with the hotplug */
12836 * lpfc_irq_set_aff - set IRQ affinity
12837 * @eqhdl: EQ handle
12838 * @cpu: cpu to set affinity
12842 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12844 cpumask_clear(&eqhdl->aff_mask);
12845 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12846 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12847 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12851 * lpfc_irq_clear_aff - clear IRQ affinity
12852 * @eqhdl: EQ handle
12856 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12858 cpumask_clear(&eqhdl->aff_mask);
12859 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12863 * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12864 * @phba: pointer to HBA context object.
12865 * @cpu: cpu going offline/online
12866 * @offline: true, cpu is going offline. false, cpu is coming online.
12868 * If cpu is going offline, we'll try our best effort to find the next
12869 * online cpu on the phba's original_mask and migrate all offlining IRQ
12872 * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12874 * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12875 * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12879 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12881 struct lpfc_vector_map_info *cpup;
12882 struct cpumask *aff_mask;
12883 unsigned int cpu_select, cpu_next, idx;
12884 const struct cpumask *orig_mask;
12886 if (phba->irq_chann_mode == NORMAL_MODE)
12889 orig_mask = &phba->sli4_hba.irq_aff_mask;
12891 if (!cpumask_test_cpu(cpu, orig_mask))
12894 cpup = &phba->sli4_hba.cpu_map[cpu];
12896 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12900 /* Find next online CPU on original mask */
12901 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12902 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12904 /* Found a valid CPU */
12905 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12906 /* Go through each eqhdl and ensure offlining
12907 * cpu aff_mask is migrated
12909 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12910 aff_mask = lpfc_get_aff_mask(idx);
12912 /* Migrate affinity */
12913 if (cpumask_test_cpu(cpu, aff_mask))
12914 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12918 /* Rely on irqbalance if no online CPUs left on NUMA */
12919 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12920 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12923 /* Migrate affinity back to this CPU */
12924 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12928 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12930 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12931 struct lpfc_queue *eq, *next;
12936 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12940 if (__lpfc_cpuhp_checks(phba, &retval))
12943 lpfc_irq_rebalance(phba, cpu, true);
12945 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12949 /* start polling on these eq's */
12950 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12951 list_del_init(&eq->_poll_list);
12952 lpfc_sli4_start_polling(eq);
12958 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12960 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12961 struct lpfc_queue *eq, *next;
12966 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12970 if (__lpfc_cpuhp_checks(phba, &retval))
12973 lpfc_irq_rebalance(phba, cpu, false);
12975 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12976 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12978 lpfc_sli4_stop_polling(eq);
12985 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12986 * @phba: pointer to lpfc hba data structure.
12988 * This routine is invoked to enable the MSI-X interrupt vectors to device
12989 * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
12990 * to cpus on the system.
12992 * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12993 * the number of cpus on the same numa node as this adapter. The vectors are
12994 * allocated without requesting OS affinity mapping. A vector will be
12995 * allocated and assigned to each online and offline cpu. If the cpu is
12996 * online, then affinity will be set to that cpu. If the cpu is offline, then
12997 * affinity will be set to the nearest peer cpu within the numa node that is
12998 * online. If there are no online cpus within the numa node, affinity is not
12999 * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
13000 * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
13003 * If numa mode is not enabled and there is more than 1 vector allocated, then
13004 * the driver relies on the managed irq interface where the OS assigns vector to
13005 * cpu affinity. The driver will then use that affinity mapping to setup its
13006 * cpu mapping table.
13010 * other values - error
13013 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13015 int vectors, rc, index;
13017 const struct cpumask *aff_mask = NULL;
13018 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13019 struct lpfc_vector_map_info *cpup;
13020 struct lpfc_hba_eq_hdl *eqhdl;
13021 const struct cpumask *maskp;
13022 unsigned int flags = PCI_IRQ_MSIX;
13024 /* Set up MSI-X multi-message vectors */
13025 vectors = phba->cfg_irq_chann;
13027 if (phba->irq_chann_mode != NORMAL_MODE)
13028 aff_mask = &phba->sli4_hba.irq_aff_mask;
13031 cpu_cnt = cpumask_weight(aff_mask);
13032 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13034 /* cpu: iterates over aff_mask including offline or online
13035 * cpu_select: iterates over online aff_mask to set affinity
13037 cpu = cpumask_first(aff_mask);
13038 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13040 flags |= PCI_IRQ_AFFINITY;
13043 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13046 "0484 PCI enable MSI-X failed (%d)\n", rc);
13051 /* Assign MSI-X vectors to interrupt handlers */
13052 for (index = 0; index < vectors; index++) {
13053 eqhdl = lpfc_get_eq_hdl(index);
13054 name = eqhdl->handler_name;
13055 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13056 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13057 LPFC_DRIVER_HANDLER_NAME"%d", index);
13059 eqhdl->idx = index;
13060 rc = request_irq(pci_irq_vector(phba->pcidev, index),
13061 &lpfc_sli4_hba_intr_handler, 0,
13064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13065 "0486 MSI-X fast-path (%d) "
13066 "request_irq failed (%d)\n", index, rc);
13070 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
13073 /* If found a neighboring online cpu, set affinity */
13074 if (cpu_select < nr_cpu_ids)
13075 lpfc_irq_set_aff(eqhdl, cpu_select);
13077 /* Assign EQ to cpu_map */
13078 lpfc_assign_eq_map_info(phba, index,
13079 LPFC_CPU_FIRST_IRQ,
13082 /* Iterate to next offline or online cpu in aff_mask */
13083 cpu = cpumask_next(cpu, aff_mask);
13085 /* Find next online cpu in aff_mask to set affinity */
13086 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13087 } else if (vectors == 1) {
13088 cpu = cpumask_first(cpu_present_mask);
13089 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13092 maskp = pci_irq_get_affinity(phba->pcidev, index);
13094 /* Loop through all CPUs associated with vector index */
13095 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13096 cpup = &phba->sli4_hba.cpu_map[cpu];
13098 /* If this is the first CPU thats assigned to
13099 * this vector, set LPFC_CPU_FIRST_IRQ.
13101 * With certain platforms its possible that irq
13102 * vectors are affinitized to all the cpu's.
13103 * This can result in each cpu_map.eq to be set
13104 * to the last vector, resulting in overwrite
13105 * of all the previous cpu_map.eq. Ensure that
13106 * each vector receives a place in cpu_map.
13107 * Later call to lpfc_cpu_affinity_check will
13108 * ensure we are nicely balanced out.
13110 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13112 lpfc_assign_eq_map_info(phba, index,
13113 LPFC_CPU_FIRST_IRQ,
13120 if (vectors != phba->cfg_irq_chann) {
13121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13122 "3238 Reducing IO channels to match number of "
13123 "MSI-X vectors, requested %d got %d\n",
13124 phba->cfg_irq_chann, vectors);
13125 if (phba->cfg_irq_chann > vectors)
13126 phba->cfg_irq_chann = vectors;
13132 /* free the irq already requested */
13133 for (--index; index >= 0; index--) {
13134 eqhdl = lpfc_get_eq_hdl(index);
13135 lpfc_irq_clear_aff(eqhdl);
13136 free_irq(eqhdl->irq, eqhdl);
13139 /* Unconfigure MSI-X capability structure */
13140 pci_free_irq_vectors(phba->pcidev);
13147 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13148 * @phba: pointer to lpfc hba data structure.
13150 * This routine is invoked to enable the MSI interrupt mode to device with
13151 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13152 * called to enable the MSI vector. The device driver is responsible for
13153 * calling the request_irq() to register MSI vector with a interrupt the
13154 * handler, which is done in this function.
13158 * other values - error
13161 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13165 struct lpfc_hba_eq_hdl *eqhdl;
13167 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13168 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13171 "0487 PCI enable MSI mode success.\n");
13173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13174 "0488 PCI enable MSI mode failed (%d)\n", rc);
13175 return rc ? rc : -1;
13178 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13179 0, LPFC_DRIVER_NAME, phba);
13181 pci_free_irq_vectors(phba->pcidev);
13182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13183 "0490 MSI request_irq failed (%d)\n", rc);
13187 eqhdl = lpfc_get_eq_hdl(0);
13188 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13190 cpu = cpumask_first(cpu_present_mask);
13191 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13193 for (index = 0; index < phba->cfg_irq_chann; index++) {
13194 eqhdl = lpfc_get_eq_hdl(index);
13195 eqhdl->idx = index;
13202 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13203 * @phba: pointer to lpfc hba data structure.
13204 * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13206 * This routine is invoked to enable device interrupt and associate driver's
13207 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13208 * interface spec. Depends on the interrupt mode configured to the driver,
13209 * the driver will try to fallback from the configured interrupt mode to an
13210 * interrupt mode which is supported by the platform, kernel, and device in
13212 * MSI-X -> MSI -> IRQ.
13216 * other values - error
13219 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13221 uint32_t intr_mode = LPFC_INTR_ERROR;
13224 if (cfg_mode == 2) {
13225 /* Preparation before conf_msi mbox cmd */
13228 /* Now, try to enable MSI-X interrupt mode */
13229 retval = lpfc_sli4_enable_msix(phba);
13231 /* Indicate initialization to MSI-X mode */
13232 phba->intr_type = MSIX;
13238 /* Fallback to MSI if MSI-X initialization failed */
13239 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13240 retval = lpfc_sli4_enable_msi(phba);
13242 /* Indicate initialization to MSI mode */
13243 phba->intr_type = MSI;
13248 /* Fallback to INTx if both MSI-X/MSI initalization failed */
13249 if (phba->intr_type == NONE) {
13250 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13251 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13253 struct lpfc_hba_eq_hdl *eqhdl;
13256 /* Indicate initialization to INTx mode */
13257 phba->intr_type = INTx;
13260 eqhdl = lpfc_get_eq_hdl(0);
13261 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13263 cpu = cpumask_first(cpu_present_mask);
13264 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13266 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13267 eqhdl = lpfc_get_eq_hdl(idx);
13276 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13277 * @phba: pointer to lpfc hba data structure.
13279 * This routine is invoked to disable device interrupt and disassociate
13280 * the driver's interrupt handler(s) from interrupt vector(s) to device
13281 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13282 * will release the interrupt vector(s) for the message signaled interrupt.
13285 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13287 /* Disable the currently initialized interrupt mode */
13288 if (phba->intr_type == MSIX) {
13290 struct lpfc_hba_eq_hdl *eqhdl;
13292 /* Free up MSI-X multi-message vectors */
13293 for (index = 0; index < phba->cfg_irq_chann; index++) {
13294 eqhdl = lpfc_get_eq_hdl(index);
13295 lpfc_irq_clear_aff(eqhdl);
13296 free_irq(eqhdl->irq, eqhdl);
13299 free_irq(phba->pcidev->irq, phba);
13302 pci_free_irq_vectors(phba->pcidev);
13304 /* Reset interrupt management states */
13305 phba->intr_type = NONE;
13306 phba->sli.slistat.sli_intr = 0;
13310 * lpfc_unset_hba - Unset SLI3 hba device initialization
13311 * @phba: pointer to lpfc hba data structure.
13313 * This routine is invoked to unset the HBA device initialization steps to
13314 * a device with SLI-3 interface spec.
13317 lpfc_unset_hba(struct lpfc_hba *phba)
13319 struct lpfc_vport *vport = phba->pport;
13320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13322 spin_lock_irq(shost->host_lock);
13323 vport->load_flag |= FC_UNLOADING;
13324 spin_unlock_irq(shost->host_lock);
13326 kfree(phba->vpi_bmask);
13327 kfree(phba->vpi_ids);
13329 lpfc_stop_hba_timers(phba);
13331 phba->pport->work_port_events = 0;
13333 lpfc_sli_hba_down(phba);
13335 lpfc_sli_brdrestart(phba);
13337 lpfc_sli_disable_intr(phba);
13343 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13344 * @phba: Pointer to HBA context object.
13346 * This function is called in the SLI4 code path to wait for completion
13347 * of device's XRIs exchange busy. It will check the XRI exchange busy
13348 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13349 * that, it will check the XRI exchange busy on outstanding FCP and ELS
13350 * I/Os every 30 seconds, log error message, and wait forever. Only when
13351 * all XRI exchange busy complete, the driver unload shall proceed with
13352 * invoking the function reset ioctl mailbox command to the CNA and the
13353 * the rest of the driver unload resource release.
13356 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13358 struct lpfc_sli4_hdw_queue *qp;
13361 int io_xri_cmpl = 1;
13362 int nvmet_xri_cmpl = 1;
13363 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13365 /* Driver just aborted IOs during the hba_unset process. Pause
13366 * here to give the HBA time to complete the IO and get entries
13367 * into the abts lists.
13369 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13371 /* Wait for NVME pending IO to flush back to transport. */
13372 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13373 lpfc_nvme_wait_for_io_drain(phba);
13376 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13377 qp = &phba->sli4_hba.hdwq[idx];
13378 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13379 if (!io_xri_cmpl) /* if list is NOT empty */
13385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13387 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13390 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13391 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13392 if (!nvmet_xri_cmpl)
13393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13394 "6424 NVMET XRI exchange busy "
13395 "wait time: %d seconds.\n",
13398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13399 "6100 IO XRI exchange busy "
13400 "wait time: %d seconds.\n",
13403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13404 "2878 ELS XRI exchange busy "
13405 "wait time: %d seconds.\n",
13407 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13408 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13410 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13411 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13415 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13416 qp = &phba->sli4_hba.hdwq[idx];
13417 io_xri_cmpl = list_empty(
13418 &qp->lpfc_abts_io_buf_list);
13419 if (!io_xri_cmpl) /* if list is NOT empty */
13425 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13426 nvmet_xri_cmpl = list_empty(
13427 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13430 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13436 * lpfc_sli4_hba_unset - Unset the fcoe hba
13437 * @phba: Pointer to HBA context object.
13439 * This function is called in the SLI4 code path to reset the HBA's FCoE
13440 * function. The caller is not required to hold any lock. This routine
13441 * issues PCI function reset mailbox command to reset the FCoE function.
13442 * At the end of the function, it calls lpfc_hba_down_post function to
13443 * free any pending commands.
13446 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13449 LPFC_MBOXQ_t *mboxq;
13450 struct pci_dev *pdev = phba->pcidev;
13452 lpfc_stop_hba_timers(phba);
13453 hrtimer_cancel(&phba->cmf_timer);
13456 phba->sli4_hba.intr_enable = 0;
13459 * Gracefully wait out the potential current outstanding asynchronous
13463 /* First, block any pending async mailbox command from posted */
13464 spin_lock_irq(&phba->hbalock);
13465 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13466 spin_unlock_irq(&phba->hbalock);
13467 /* Now, trying to wait it out if we can */
13468 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13470 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13473 /* Forcefully release the outstanding mailbox command if timed out */
13474 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13475 spin_lock_irq(&phba->hbalock);
13476 mboxq = phba->sli.mbox_active;
13477 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13478 __lpfc_mbox_cmpl_put(phba, mboxq);
13479 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13480 phba->sli.mbox_active = NULL;
13481 spin_unlock_irq(&phba->hbalock);
13484 /* Abort all iocbs associated with the hba */
13485 lpfc_sli_hba_iocb_abort(phba);
13487 if (!pci_channel_offline(phba->pcidev))
13488 /* Wait for completion of device XRI exchange busy */
13489 lpfc_sli4_xri_exchange_busy_wait(phba);
13491 /* per-phba callback de-registration for hotplug event */
13493 lpfc_cpuhp_remove(phba);
13495 /* Disable PCI subsystem interrupt */
13496 lpfc_sli4_disable_intr(phba);
13498 /* Disable SR-IOV if enabled */
13499 if (phba->cfg_sriov_nr_virtfn)
13500 pci_disable_sriov(pdev);
13502 /* Stop kthread signal shall trigger work_done one more time */
13503 kthread_stop(phba->worker_thread);
13505 /* Disable FW logging to host memory */
13506 lpfc_ras_stop_fwlog(phba);
13508 /* Reset SLI4 HBA FCoE function */
13509 lpfc_pci_function_reset(phba);
13511 /* release all queue allocated resources. */
13512 lpfc_sli4_queue_destroy(phba);
13514 /* Free RAS DMA memory */
13515 if (phba->ras_fwlog.ras_enabled)
13516 lpfc_sli4_ras_dma_free(phba);
13518 /* Stop the SLI4 device port */
13520 phba->pport->work_port_events = 0;
13524 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13529 for (bit = 0; bit < 8; bit++) {
13530 msb = (crc >> 31) & 1;
13533 if (msb ^ (byte & 1)) {
13534 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13543 lpfc_cgn_reverse_bits(uint32_t wd)
13545 uint32_t result = 0;
13548 for (i = 0; i < 32; i++) {
13550 result |= (1 & (wd >> i));
13556 * The routine corresponds with the algorithm the HBA firmware
13557 * uses to validate the data integrity.
13560 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13564 uint8_t *data = (uint8_t *)ptr;
13566 for (i = 0; i < byteLen; ++i)
13567 crc = lpfc_cgn_crc32(crc, data[i]);
13569 result = ~lpfc_cgn_reverse_bits(crc);
13574 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13576 struct lpfc_cgn_info *cp;
13577 struct timespec64 cmpl_time;
13582 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13583 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13587 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13589 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13590 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13591 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13592 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13594 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13595 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13596 atomic64_set(&phba->cgn_latency_evt, 0);
13597 phba->cgn_evt_minute = 0;
13598 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13600 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13601 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13602 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13604 /* cgn parameters */
13605 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13606 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13607 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13608 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13610 ktime_get_real_ts64(&cmpl_time);
13611 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13613 cp->cgn_info_month = broken.tm_mon + 1;
13614 cp->cgn_info_day = broken.tm_mday;
13615 cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13616 cp->cgn_info_hour = broken.tm_hour;
13617 cp->cgn_info_minute = broken.tm_min;
13618 cp->cgn_info_second = broken.tm_sec;
13620 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13621 "2643 CGNInfo Init: Start Time "
13622 "%d/%d/%d %d:%d:%d\n",
13623 cp->cgn_info_day, cp->cgn_info_month,
13624 cp->cgn_info_year, cp->cgn_info_hour,
13625 cp->cgn_info_minute, cp->cgn_info_second);
13627 /* Fill in default LUN qdepth */
13629 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13630 cp->cgn_lunq = cpu_to_le16(size);
13633 /* last used Index initialized to 0xff already */
13635 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13636 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13637 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13638 cp->cgn_info_crc = cpu_to_le32(crc);
13640 phba->cgn_evt_timestamp = jiffies +
13641 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13645 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13647 struct lpfc_cgn_info *cp;
13648 struct timespec64 cmpl_time;
13652 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13653 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13658 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13659 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13661 ktime_get_real_ts64(&cmpl_time);
13662 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13664 cp->cgn_stat_month = broken.tm_mon + 1;
13665 cp->cgn_stat_day = broken.tm_mday;
13666 cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13667 cp->cgn_stat_hour = broken.tm_hour;
13668 cp->cgn_stat_minute = broken.tm_min;
13670 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13671 "2647 CGNstat Init: Start Time "
13672 "%d/%d/%d %d:%d\n",
13673 cp->cgn_stat_day, cp->cgn_stat_month,
13674 cp->cgn_stat_year, cp->cgn_stat_hour,
13675 cp->cgn_stat_minute);
13677 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13678 cp->cgn_info_crc = cpu_to_le32(crc);
13682 * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13683 * @phba: Pointer to hba context object.
13684 * @reg: flag to determine register or unregister.
13687 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13689 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13690 union lpfc_sli4_cfg_shdr *shdr;
13691 uint32_t shdr_status, shdr_add_status;
13692 LPFC_MBOXQ_t *mboxq;
13698 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13700 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13701 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13702 "HBA state x%x reg %d\n",
13703 phba->pport->port_state, reg);
13707 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13708 sizeof(struct lpfc_sli4_cfg_mhdr));
13709 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13710 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13711 LPFC_SLI4_MBX_EMBED);
13712 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13713 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13715 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13717 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13718 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13719 reg_congestion_buf->addr_lo =
13720 putPaddrLow(phba->cgn_i->phys);
13721 reg_congestion_buf->addr_hi =
13722 putPaddrHigh(phba->cgn_i->phys);
13724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13725 shdr = (union lpfc_sli4_cfg_shdr *)
13726 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13730 mempool_free(mboxq, phba->mbox_mem_pool);
13731 if (shdr_status || shdr_add_status || rc) {
13732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13733 "2642 REG_CONGESTION_BUF mailbox "
13734 "failed with status x%x add_status x%x,"
13735 " mbx status x%x reg %d\n",
13736 shdr_status, shdr_add_status, rc, reg);
13743 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13745 lpfc_cmf_stop(phba);
13746 return __lpfc_reg_congestion_buf(phba, 0);
13750 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13752 return __lpfc_reg_congestion_buf(phba, 1);
13756 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13757 * @phba: Pointer to HBA context object.
13758 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13760 * This function is called in the SLI4 code path to read the port's
13761 * sli4 capabilities.
13763 * This function may be be called from any context that can block-wait
13764 * for the completion. The expectation is that this routine is called
13765 * typically from probe_one or from the online routine.
13768 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13771 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13772 struct lpfc_pc_sli4_params *sli4_params;
13775 bool exp_wqcq_pages = true;
13776 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13779 * By default, the driver assumes the SLI4 port requires RPI
13780 * header postings. The SLI4_PARAM response will correct this
13783 phba->sli4_hba.rpi_hdrs_in_use = 1;
13785 /* Read the port's SLI4 Config Parameters */
13786 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13787 sizeof(struct lpfc_sli4_cfg_mhdr));
13788 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13789 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13790 length, LPFC_SLI4_MBX_EMBED);
13791 if (!phba->sli4_hba.intr_enable)
13792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13794 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13795 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13799 sli4_params = &phba->sli4_hba.pc_sli4_params;
13800 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13801 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13802 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13803 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13804 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13805 mbx_sli4_parameters);
13806 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13807 mbx_sli4_parameters);
13808 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13809 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13811 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13812 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13813 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13814 mbx_sli4_parameters);
13815 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13816 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13817 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13818 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13819 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13820 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13821 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13822 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13823 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13824 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13825 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13826 mbx_sli4_parameters);
13827 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13828 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13829 mbx_sli4_parameters);
13830 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13831 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13833 /* Check for Extended Pre-Registered SGL support */
13834 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13836 /* Check for firmware nvme support */
13837 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13838 bf_get(cfg_xib, mbx_sli4_parameters));
13841 /* Save this to indicate the Firmware supports NVME */
13842 sli4_params->nvme = 1;
13844 /* Firmware NVME support, check driver FC4 NVME support */
13845 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13846 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13847 "6133 Disabling NVME support: "
13848 "FC4 type not supported: x%x\n",
13849 phba->cfg_enable_fc4_type);
13853 /* No firmware NVME support, check driver FC4 NVME support */
13854 sli4_params->nvme = 0;
13855 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13857 "6101 Disabling NVME support: Not "
13858 "supported by firmware (%d %d) x%x\n",
13859 bf_get(cfg_nvme, mbx_sli4_parameters),
13860 bf_get(cfg_xib, mbx_sli4_parameters),
13861 phba->cfg_enable_fc4_type);
13863 phba->nvmet_support = 0;
13864 phba->cfg_nvmet_mrq = 0;
13865 phba->cfg_nvme_seg_cnt = 0;
13867 /* If no FC4 type support, move to just SCSI support */
13868 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13870 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13874 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13875 * accommodate 512K and 1M IOs in a single nvme buf.
13877 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13878 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13880 /* Enable embedded Payload BDE if support is indicated */
13881 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13882 phba->cfg_enable_pbde = 1;
13884 phba->cfg_enable_pbde = 0;
13887 * To support Suppress Response feature we must satisfy 3 conditions.
13888 * lpfc_suppress_rsp module parameter must be set (default).
13889 * In SLI4-Parameters Descriptor:
13890 * Extended Inline Buffers (XIB) must be supported.
13891 * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13892 * (double negative).
13894 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13895 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13896 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13898 phba->cfg_suppress_rsp = 0;
13900 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13901 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13903 /* Make sure that sge_supp_len can be handled by the driver */
13904 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13905 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13908 * Check whether the adapter supports an embedded copy of the
13909 * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13910 * to use this option, 128-byte WQEs must be used.
13912 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13913 phba->fcp_embed_io = 1;
13915 phba->fcp_embed_io = 0;
13917 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13918 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13919 bf_get(cfg_xib, mbx_sli4_parameters),
13920 phba->cfg_enable_pbde,
13921 phba->fcp_embed_io, sli4_params->nvme,
13922 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13924 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13925 LPFC_SLI_INTF_IF_TYPE_2) &&
13926 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13927 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13928 exp_wqcq_pages = false;
13930 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13931 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13933 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13934 phba->enab_exp_wqcq_pages = 1;
13936 phba->enab_exp_wqcq_pages = 0;
13938 * Check if the SLI port supports MDS Diagnostics
13940 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13941 phba->mds_diags_support = 1;
13943 phba->mds_diags_support = 0;
13946 * Check if the SLI port supports NSLER
13948 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13957 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13958 * @pdev: pointer to PCI device
13959 * @pid: pointer to PCI device identifier
13961 * This routine is to be called to attach a device with SLI-3 interface spec
13962 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13963 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13964 * information of the device and driver to see if the driver state that it can
13965 * support this kind of device. If the match is successful, the driver core
13966 * invokes this routine. If this routine determines it can claim the HBA, it
13967 * does all the initialization that it needs to do to handle the HBA properly.
13970 * 0 - driver can claim the device
13971 * negative value - driver can not claim the device
13974 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13976 struct lpfc_hba *phba;
13977 struct lpfc_vport *vport = NULL;
13978 struct Scsi_Host *shost = NULL;
13980 uint32_t cfg_mode, intr_mode;
13982 /* Allocate memory for HBA structure */
13983 phba = lpfc_hba_alloc(pdev);
13987 /* Perform generic PCI device enabling operation */
13988 error = lpfc_enable_pci_dev(phba);
13990 goto out_free_phba;
13992 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13993 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13995 goto out_disable_pci_dev;
13997 /* Set up SLI-3 specific device PCI memory space */
13998 error = lpfc_sli_pci_mem_setup(phba);
14000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14001 "1402 Failed to set up pci memory space.\n");
14002 goto out_disable_pci_dev;
14005 /* Set up SLI-3 specific device driver resources */
14006 error = lpfc_sli_driver_resource_setup(phba);
14008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14009 "1404 Failed to set up driver resource.\n");
14010 goto out_unset_pci_mem_s3;
14013 /* Initialize and populate the iocb list per host */
14015 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14018 "1405 Failed to initialize iocb list.\n");
14019 goto out_unset_driver_resource_s3;
14022 /* Set up common device driver resources */
14023 error = lpfc_setup_driver_resource_phase2(phba);
14025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14026 "1406 Failed to set up driver resource.\n");
14027 goto out_free_iocb_list;
14030 /* Get the default values for Model Name and Description */
14031 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14033 /* Create SCSI host to the physical port */
14034 error = lpfc_create_shost(phba);
14036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14037 "1407 Failed to create scsi host.\n");
14038 goto out_unset_driver_resource;
14041 /* Configure sysfs attributes */
14042 vport = phba->pport;
14043 error = lpfc_alloc_sysfs_attr(vport);
14045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14046 "1476 Failed to allocate sysfs attr\n");
14047 goto out_destroy_shost;
14050 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14051 /* Now, trying to enable interrupt and bring up the device */
14052 cfg_mode = phba->cfg_use_msi;
14054 /* Put device to a known state before enabling interrupt */
14055 lpfc_stop_port(phba);
14056 /* Configure and enable interrupt */
14057 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14058 if (intr_mode == LPFC_INTR_ERROR) {
14059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14060 "0431 Failed to enable interrupt.\n");
14062 goto out_free_sysfs_attr;
14064 /* SLI-3 HBA setup */
14065 if (lpfc_sli_hba_setup(phba)) {
14066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14067 "1477 Failed to set up hba\n");
14069 goto out_remove_device;
14072 /* Wait 50ms for the interrupts of previous mailbox commands */
14074 /* Check active interrupts on message signaled interrupts */
14075 if (intr_mode == 0 ||
14076 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14077 /* Log the current active interrupt mode */
14078 phba->intr_mode = intr_mode;
14079 lpfc_log_intr_mode(phba, intr_mode);
14082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14083 "0447 Configure interrupt mode (%d) "
14084 "failed active interrupt test.\n",
14086 /* Disable the current interrupt mode */
14087 lpfc_sli_disable_intr(phba);
14088 /* Try next level of interrupt mode */
14089 cfg_mode = --intr_mode;
14093 /* Perform post initialization setup */
14094 lpfc_post_init_setup(phba);
14096 /* Check if there are static vports to be created. */
14097 lpfc_create_static_vport(phba);
14102 lpfc_unset_hba(phba);
14103 out_free_sysfs_attr:
14104 lpfc_free_sysfs_attr(vport);
14106 lpfc_destroy_shost(phba);
14107 out_unset_driver_resource:
14108 lpfc_unset_driver_resource_phase2(phba);
14109 out_free_iocb_list:
14110 lpfc_free_iocb_list(phba);
14111 out_unset_driver_resource_s3:
14112 lpfc_sli_driver_resource_unset(phba);
14113 out_unset_pci_mem_s3:
14114 lpfc_sli_pci_mem_unset(phba);
14115 out_disable_pci_dev:
14116 lpfc_disable_pci_dev(phba);
14118 scsi_host_put(shost);
14120 lpfc_hba_free(phba);
14125 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14126 * @pdev: pointer to PCI device
14128 * This routine is to be called to disattach a device with SLI-3 interface
14129 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14130 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14131 * device to be removed from the PCI subsystem properly.
14134 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14136 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14138 struct lpfc_vport **vports;
14139 struct lpfc_hba *phba = vport->phba;
14142 spin_lock_irq(&phba->hbalock);
14143 vport->load_flag |= FC_UNLOADING;
14144 spin_unlock_irq(&phba->hbalock);
14146 lpfc_free_sysfs_attr(vport);
14148 /* Release all the vports against this physical port */
14149 vports = lpfc_create_vport_work_array(phba);
14150 if (vports != NULL)
14151 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14152 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14154 fc_vport_terminate(vports[i]->fc_vport);
14156 lpfc_destroy_vport_work_array(phba, vports);
14158 /* Remove FC host with the physical port */
14159 fc_remove_host(shost);
14160 scsi_remove_host(shost);
14162 /* Clean up all nodes, mailboxes and IOs. */
14163 lpfc_cleanup(vport);
14166 * Bring down the SLI Layer. This step disable all interrupts,
14167 * clears the rings, discards all mailbox commands, and resets
14171 /* HBA interrupt will be disabled after this call */
14172 lpfc_sli_hba_down(phba);
14173 /* Stop kthread signal shall trigger work_done one more time */
14174 kthread_stop(phba->worker_thread);
14175 /* Final cleanup of txcmplq and reset the HBA */
14176 lpfc_sli_brdrestart(phba);
14178 kfree(phba->vpi_bmask);
14179 kfree(phba->vpi_ids);
14181 lpfc_stop_hba_timers(phba);
14182 spin_lock_irq(&phba->port_list_lock);
14183 list_del_init(&vport->listentry);
14184 spin_unlock_irq(&phba->port_list_lock);
14186 lpfc_debugfs_terminate(vport);
14188 /* Disable SR-IOV if enabled */
14189 if (phba->cfg_sriov_nr_virtfn)
14190 pci_disable_sriov(pdev);
14192 /* Disable interrupt */
14193 lpfc_sli_disable_intr(phba);
14195 scsi_host_put(shost);
14198 * Call scsi_free before mem_free since scsi bufs are released to their
14199 * corresponding pools here.
14201 lpfc_scsi_free(phba);
14202 lpfc_free_iocb_list(phba);
14204 lpfc_mem_free_all(phba);
14206 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14207 phba->hbqslimp.virt, phba->hbqslimp.phys);
14209 /* Free resources associated with SLI2 interface */
14210 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14211 phba->slim2p.virt, phba->slim2p.phys);
14213 /* unmap adapter SLIM and Control Registers */
14214 iounmap(phba->ctrl_regs_memmap_p);
14215 iounmap(phba->slim_memmap_p);
14217 lpfc_hba_free(phba);
14219 pci_release_mem_regions(pdev);
14220 pci_disable_device(pdev);
14224 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14225 * @dev_d: pointer to device
14227 * This routine is to be called from the kernel's PCI subsystem to support
14228 * system Power Management (PM) to device with SLI-3 interface spec. When
14229 * PM invokes this method, it quiesces the device by stopping the driver's
14230 * worker thread for the device, turning off device's interrupt and DMA,
14231 * and bring the device offline. Note that as the driver implements the
14232 * minimum PM requirements to a power-aware driver's PM support for the
14233 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14234 * to the suspend() method call will be treated as SUSPEND and the driver will
14235 * fully reinitialize its device during resume() method call, the driver will
14236 * set device to PCI_D3hot state in PCI config space instead of setting it
14237 * according to the @msg provided by the PM.
14240 * 0 - driver suspended the device
14243 static int __maybe_unused
14244 lpfc_pci_suspend_one_s3(struct device *dev_d)
14246 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14247 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14250 "0473 PCI device Power Management suspend.\n");
14252 /* Bring down the device */
14253 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14254 lpfc_offline(phba);
14255 kthread_stop(phba->worker_thread);
14257 /* Disable interrupt from device */
14258 lpfc_sli_disable_intr(phba);
14264 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14265 * @dev_d: pointer to device
14267 * This routine is to be called from the kernel's PCI subsystem to support
14268 * system Power Management (PM) to device with SLI-3 interface spec. When PM
14269 * invokes this method, it restores the device's PCI config space state and
14270 * fully reinitializes the device and brings it online. Note that as the
14271 * driver implements the minimum PM requirements to a power-aware driver's
14272 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14273 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14274 * driver will fully reinitialize its device during resume() method call,
14275 * the device will be set to PCI_D0 directly in PCI config space before
14276 * restoring the state.
14279 * 0 - driver suspended the device
14282 static int __maybe_unused
14283 lpfc_pci_resume_one_s3(struct device *dev_d)
14285 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14286 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14287 uint32_t intr_mode;
14290 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14291 "0452 PCI device Power Management resume.\n");
14293 /* Startup the kernel thread for this host adapter. */
14294 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14295 "lpfc_worker_%d", phba->brd_no);
14296 if (IS_ERR(phba->worker_thread)) {
14297 error = PTR_ERR(phba->worker_thread);
14298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14299 "0434 PM resume failed to start worker "
14300 "thread: error=x%x.\n", error);
14304 /* Init cpu_map array */
14305 lpfc_cpu_map_array_init(phba);
14306 /* Init hba_eq_hdl array */
14307 lpfc_hba_eq_hdl_array_init(phba);
14308 /* Configure and enable interrupt */
14309 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14310 if (intr_mode == LPFC_INTR_ERROR) {
14311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14312 "0430 PM resume Failed to enable interrupt\n");
14315 phba->intr_mode = intr_mode;
14317 /* Restart HBA and bring it online */
14318 lpfc_sli_brdrestart(phba);
14321 /* Log the current active interrupt mode */
14322 lpfc_log_intr_mode(phba, phba->intr_mode);
14328 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14329 * @phba: pointer to lpfc hba data structure.
14331 * This routine is called to prepare the SLI3 device for PCI slot recover. It
14332 * aborts all the outstanding SCSI I/Os to the pci device.
14335 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14338 "2723 PCI channel I/O abort preparing for recovery\n");
14341 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14342 * and let the SCSI mid-layer to retry them to recover.
14344 lpfc_sli_abort_fcp_rings(phba);
14348 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14349 * @phba: pointer to lpfc hba data structure.
14351 * This routine is called to prepare the SLI3 device for PCI slot reset. It
14352 * disables the device interrupt and pci device, and aborts the internal FCP
14356 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14359 "2710 PCI channel disable preparing for reset\n");
14361 /* Block any management I/Os to the device */
14362 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14364 /* Block all SCSI devices' I/Os on the host */
14365 lpfc_scsi_dev_block(phba);
14367 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14368 lpfc_sli_flush_io_rings(phba);
14370 /* stop all timers */
14371 lpfc_stop_hba_timers(phba);
14373 /* Disable interrupt and pci device */
14374 lpfc_sli_disable_intr(phba);
14375 pci_disable_device(phba->pcidev);
14379 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14380 * @phba: pointer to lpfc hba data structure.
14382 * This routine is called to prepare the SLI3 device for PCI slot permanently
14383 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14387 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14390 "2711 PCI channel permanent disable for failure\n");
14391 /* Block all SCSI devices' I/Os on the host */
14392 lpfc_scsi_dev_block(phba);
14393 lpfc_sli4_prep_dev_for_reset(phba);
14395 /* stop all timers */
14396 lpfc_stop_hba_timers(phba);
14398 /* Clean up all driver's outstanding SCSI I/Os */
14399 lpfc_sli_flush_io_rings(phba);
14403 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14404 * @pdev: pointer to PCI device.
14405 * @state: the current PCI connection state.
14407 * This routine is called from the PCI subsystem for I/O error handling to
14408 * device with SLI-3 interface spec. This function is called by the PCI
14409 * subsystem after a PCI bus error affecting this device has been detected.
14410 * When this function is invoked, it will need to stop all the I/Os and
14411 * interrupt(s) to the device. Once that is done, it will return
14412 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14416 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14417 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14418 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14420 static pci_ers_result_t
14421 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14423 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14424 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14427 case pci_channel_io_normal:
14428 /* Non-fatal error, prepare for recovery */
14429 lpfc_sli_prep_dev_for_recover(phba);
14430 return PCI_ERS_RESULT_CAN_RECOVER;
14431 case pci_channel_io_frozen:
14432 /* Fatal error, prepare for slot reset */
14433 lpfc_sli_prep_dev_for_reset(phba);
14434 return PCI_ERS_RESULT_NEED_RESET;
14435 case pci_channel_io_perm_failure:
14436 /* Permanent failure, prepare for device down */
14437 lpfc_sli_prep_dev_for_perm_failure(phba);
14438 return PCI_ERS_RESULT_DISCONNECT;
14440 /* Unknown state, prepare and request slot reset */
14441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442 "0472 Unknown PCI error state: x%x\n", state);
14443 lpfc_sli_prep_dev_for_reset(phba);
14444 return PCI_ERS_RESULT_NEED_RESET;
14449 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14450 * @pdev: pointer to PCI device.
14452 * This routine is called from the PCI subsystem for error handling to
14453 * device with SLI-3 interface spec. This is called after PCI bus has been
14454 * reset to restart the PCI card from scratch, as if from a cold-boot.
14455 * During the PCI subsystem error recovery, after driver returns
14456 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14457 * recovery and then call this routine before calling the .resume method
14458 * to recover the device. This function will initialize the HBA device,
14459 * enable the interrupt, but it will just put the HBA to offline state
14460 * without passing any I/O traffic.
14463 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
14464 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14466 static pci_ers_result_t
14467 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14469 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14470 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14471 struct lpfc_sli *psli = &phba->sli;
14472 uint32_t intr_mode;
14474 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14475 if (pci_enable_device_mem(pdev)) {
14476 printk(KERN_ERR "lpfc: Cannot re-enable "
14477 "PCI device after reset.\n");
14478 return PCI_ERS_RESULT_DISCONNECT;
14481 pci_restore_state(pdev);
14484 * As the new kernel behavior of pci_restore_state() API call clears
14485 * device saved_state flag, need to save the restored state again.
14487 pci_save_state(pdev);
14489 if (pdev->is_busmaster)
14490 pci_set_master(pdev);
14492 spin_lock_irq(&phba->hbalock);
14493 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14494 spin_unlock_irq(&phba->hbalock);
14496 /* Configure and enable interrupt */
14497 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14498 if (intr_mode == LPFC_INTR_ERROR) {
14499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14500 "0427 Cannot re-enable interrupt after "
14502 return PCI_ERS_RESULT_DISCONNECT;
14504 phba->intr_mode = intr_mode;
14506 /* Take device offline, it will perform cleanup */
14507 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14508 lpfc_offline(phba);
14509 lpfc_sli_brdrestart(phba);
14511 /* Log the current active interrupt mode */
14512 lpfc_log_intr_mode(phba, phba->intr_mode);
14514 return PCI_ERS_RESULT_RECOVERED;
14518 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14519 * @pdev: pointer to PCI device
14521 * This routine is called from the PCI subsystem for error handling to device
14522 * with SLI-3 interface spec. It is called when kernel error recovery tells
14523 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14524 * error recovery. After this call, traffic can start to flow from this device
14528 lpfc_io_resume_s3(struct pci_dev *pdev)
14530 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14531 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14533 /* Bring device online, it will be no-op for non-fatal error resume */
14538 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14539 * @phba: pointer to lpfc hba data structure.
14541 * returns the number of ELS/CT IOCBs to reserve
14544 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14546 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14548 if (phba->sli_rev == LPFC_SLI_REV4) {
14549 if (max_xri <= 100)
14551 else if (max_xri <= 256)
14553 else if (max_xri <= 512)
14555 else if (max_xri <= 1024)
14557 else if (max_xri <= 1536)
14559 else if (max_xri <= 2048)
14568 * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14569 * @phba: pointer to lpfc hba data structure.
14571 * returns the number of ELS/CT + NVMET IOCBs to reserve
14574 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14576 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14578 if (phba->nvmet_support)
14579 max_xri += LPFC_NVMET_BUF_POST;
14585 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14586 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14587 const struct firmware *fw)
14592 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14593 /* Three cases: (1) FW was not supported on the detected adapter.
14594 * (2) FW update has been locked out administratively.
14595 * (3) Some other error during FW update.
14596 * In each case, an unmaskable message is written to the console
14597 * for admin diagnosis.
14599 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14600 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14601 magic_number != MAGIC_NUMBER_G6) ||
14602 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14603 magic_number != MAGIC_NUMBER_G7) ||
14604 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14605 magic_number != MAGIC_NUMBER_G7P)) {
14606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14607 "3030 This firmware version is not supported on"
14608 " this HBA model. Device:%x Magic:%x Type:%x "
14609 "ID:%x Size %d %zd\n",
14610 phba->pcidev->device, magic_number, ftype, fid,
14613 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14615 "3021 Firmware downloads have been prohibited "
14616 "by a system configuration setting on "
14617 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14619 phba->pcidev->device, magic_number, ftype, fid,
14623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14624 "3022 FW Download failed. Add Status x%x "
14625 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14627 offset, phba->pcidev->device, magic_number,
14628 ftype, fid, fsize, fw->size);
14635 * lpfc_write_firmware - attempt to write a firmware image to the port
14636 * @fw: pointer to firmware image returned from request_firmware.
14637 * @context: pointer to firmware image returned from request_firmware.
14641 lpfc_write_firmware(const struct firmware *fw, void *context)
14643 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14644 char fwrev[FW_REV_STR_SIZE];
14645 struct lpfc_grp_hdr *image;
14646 struct list_head dma_buffer_list;
14648 struct lpfc_dmabuf *dmabuf, *next;
14649 uint32_t offset = 0, temp_offset = 0;
14650 uint32_t magic_number, ftype, fid, fsize;
14652 /* It can be null in no-wait mode, sanity check */
14657 image = (struct lpfc_grp_hdr *)fw->data;
14659 magic_number = be32_to_cpu(image->magic_number);
14660 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14661 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14662 fsize = be32_to_cpu(image->size);
14664 INIT_LIST_HEAD(&dma_buffer_list);
14665 lpfc_decode_firmware_rev(phba, fwrev, 1);
14666 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14667 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14668 "3023 Updating Firmware, Current Version:%s "
14669 "New Version:%s\n",
14670 fwrev, image->revision);
14671 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14672 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14678 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14682 if (!dmabuf->virt) {
14687 list_add_tail(&dmabuf->list, &dma_buffer_list);
14689 while (offset < fw->size) {
14690 temp_offset = offset;
14691 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14692 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14693 memcpy(dmabuf->virt,
14694 fw->data + temp_offset,
14695 fw->size - temp_offset);
14696 temp_offset = fw->size;
14699 memcpy(dmabuf->virt, fw->data + temp_offset,
14701 temp_offset += SLI4_PAGE_SIZE;
14703 rc = lpfc_wr_object(phba, &dma_buffer_list,
14704 (fw->size - offset), &offset);
14706 rc = lpfc_log_write_firmware_error(phba, offset,
14717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14718 "3029 Skipped Firmware update, Current "
14719 "Version:%s New Version:%s\n",
14720 fwrev, image->revision);
14723 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14724 list_del(&dmabuf->list);
14725 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14726 dmabuf->virt, dmabuf->phys);
14729 release_firmware(fw);
14732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14733 "3062 Firmware update error, status %d.\n", rc);
14735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14736 "3024 Firmware update success: size %d.\n", rc);
14740 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14741 * @phba: pointer to lpfc hba data structure.
14742 * @fw_upgrade: which firmware to update.
14744 * This routine is called to perform Linux generic firmware upgrade on device
14745 * that supports such feature.
14748 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14750 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14752 const struct firmware *fw;
14754 /* Only supported on SLI4 interface type 2 for now */
14755 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14756 LPFC_SLI_INTF_IF_TYPE_2)
14759 snprintf(file_name, ELX_MODEL_NAME_SIZE, "/*(DEBLOBBED)*/", phba->ModelName);
14761 if (fw_upgrade == INT_FW_UPGRADE) {
14762 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14763 file_name, &phba->pcidev->dev,
14764 GFP_KERNEL, (void *)phba,
14765 lpfc_write_firmware);
14766 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14767 ret = reject_firmware(&fw, file_name, &phba->pcidev->dev);
14769 lpfc_write_firmware(fw, (void *)phba);
14778 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14779 * @pdev: pointer to PCI device
14780 * @pid: pointer to PCI device identifier
14782 * This routine is called from the kernel's PCI subsystem to device with
14783 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14784 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14785 * information of the device and driver to see if the driver state that it
14786 * can support this kind of device. If the match is successful, the driver
14787 * core invokes this routine. If this routine determines it can claim the HBA,
14788 * it does all the initialization that it needs to do to handle the HBA
14792 * 0 - driver can claim the device
14793 * negative value - driver can not claim the device
14796 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14798 struct lpfc_hba *phba;
14799 struct lpfc_vport *vport = NULL;
14800 struct Scsi_Host *shost = NULL;
14802 uint32_t cfg_mode, intr_mode;
14804 /* Allocate memory for HBA structure */
14805 phba = lpfc_hba_alloc(pdev);
14809 INIT_LIST_HEAD(&phba->poll_list);
14811 /* Perform generic PCI device enabling operation */
14812 error = lpfc_enable_pci_dev(phba);
14814 goto out_free_phba;
14816 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14817 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14819 goto out_disable_pci_dev;
14821 /* Set up SLI-4 specific device PCI memory space */
14822 error = lpfc_sli4_pci_mem_setup(phba);
14824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14825 "1410 Failed to set up pci memory space.\n");
14826 goto out_disable_pci_dev;
14829 /* Set up SLI-4 Specific device driver resources */
14830 error = lpfc_sli4_driver_resource_setup(phba);
14832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14833 "1412 Failed to set up driver resource.\n");
14834 goto out_unset_pci_mem_s4;
14837 INIT_LIST_HEAD(&phba->active_rrq_list);
14838 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14840 /* Set up common device driver resources */
14841 error = lpfc_setup_driver_resource_phase2(phba);
14843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14844 "1414 Failed to set up driver resource.\n");
14845 goto out_unset_driver_resource_s4;
14848 /* Get the default values for Model Name and Description */
14849 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14851 /* Now, trying to enable interrupt and bring up the device */
14852 cfg_mode = phba->cfg_use_msi;
14854 /* Put device to a known state before enabling interrupt */
14855 phba->pport = NULL;
14856 lpfc_stop_port(phba);
14858 /* Init cpu_map array */
14859 lpfc_cpu_map_array_init(phba);
14861 /* Init hba_eq_hdl array */
14862 lpfc_hba_eq_hdl_array_init(phba);
14864 /* Configure and enable interrupt */
14865 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14866 if (intr_mode == LPFC_INTR_ERROR) {
14867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14868 "0426 Failed to enable interrupt.\n");
14870 goto out_unset_driver_resource;
14872 /* Default to single EQ for non-MSI-X */
14873 if (phba->intr_type != MSIX) {
14874 phba->cfg_irq_chann = 1;
14875 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14876 if (phba->nvmet_support)
14877 phba->cfg_nvmet_mrq = 1;
14880 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14882 /* Create SCSI host to the physical port */
14883 error = lpfc_create_shost(phba);
14885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14886 "1415 Failed to create scsi host.\n");
14887 goto out_disable_intr;
14889 vport = phba->pport;
14890 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14892 /* Configure sysfs attributes */
14893 error = lpfc_alloc_sysfs_attr(vport);
14895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14896 "1416 Failed to allocate sysfs attr\n");
14897 goto out_destroy_shost;
14900 /* Set up SLI-4 HBA */
14901 if (lpfc_sli4_hba_setup(phba)) {
14902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14903 "1421 Failed to set up hba\n");
14905 goto out_free_sysfs_attr;
14908 /* Log the current active interrupt mode */
14909 phba->intr_mode = intr_mode;
14910 lpfc_log_intr_mode(phba, intr_mode);
14912 /* Perform post initialization setup */
14913 lpfc_post_init_setup(phba);
14915 /* NVME support in FW earlier in the driver load corrects the
14916 * FC4 type making a check for nvme_support unnecessary.
14918 if (phba->nvmet_support == 0) {
14919 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14920 /* Create NVME binding with nvme_fc_transport. This
14921 * ensures the vport is initialized. If the localport
14922 * create fails, it should not unload the driver to
14923 * support field issues.
14925 error = lpfc_nvme_create_localport(vport);
14927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14928 "6004 NVME registration "
14929 "failed, error x%x\n",
14935 /* check for firmware upgrade or downgrade */
14936 if (phba->cfg_request_firmware_upgrade)
14937 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14939 /* Check if there are static vports to be created. */
14940 lpfc_create_static_vport(phba);
14942 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14943 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14947 out_free_sysfs_attr:
14948 lpfc_free_sysfs_attr(vport);
14950 lpfc_destroy_shost(phba);
14952 lpfc_sli4_disable_intr(phba);
14953 out_unset_driver_resource:
14954 lpfc_unset_driver_resource_phase2(phba);
14955 out_unset_driver_resource_s4:
14956 lpfc_sli4_driver_resource_unset(phba);
14957 out_unset_pci_mem_s4:
14958 lpfc_sli4_pci_mem_unset(phba);
14959 out_disable_pci_dev:
14960 lpfc_disable_pci_dev(phba);
14962 scsi_host_put(shost);
14964 lpfc_hba_free(phba);
14969 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14970 * @pdev: pointer to PCI device
14972 * This routine is called from the kernel's PCI subsystem to device with
14973 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14974 * removed from PCI bus, it performs all the necessary cleanup for the HBA
14975 * device to be removed from the PCI subsystem properly.
14978 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14980 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14981 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14982 struct lpfc_vport **vports;
14983 struct lpfc_hba *phba = vport->phba;
14986 /* Mark the device unloading flag */
14987 spin_lock_irq(&phba->hbalock);
14988 vport->load_flag |= FC_UNLOADING;
14989 spin_unlock_irq(&phba->hbalock);
14991 lpfc_unreg_congestion_buf(phba);
14993 lpfc_free_sysfs_attr(vport);
14995 /* Release all the vports against this physical port */
14996 vports = lpfc_create_vport_work_array(phba);
14997 if (vports != NULL)
14998 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14999 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
15001 fc_vport_terminate(vports[i]->fc_vport);
15003 lpfc_destroy_vport_work_array(phba, vports);
15005 /* Remove FC host with the physical port */
15006 fc_remove_host(shost);
15007 scsi_remove_host(shost);
15009 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
15010 * localports are destroyed after to cleanup all transport memory.
15012 lpfc_cleanup(vport);
15013 lpfc_nvmet_destroy_targetport(phba);
15014 lpfc_nvme_destroy_localport(vport);
15016 /* De-allocate multi-XRI pools */
15017 if (phba->cfg_xri_rebalancing)
15018 lpfc_destroy_multixri_pools(phba);
15021 * Bring down the SLI Layer. This step disables all interrupts,
15022 * clears the rings, discards all mailbox commands, and resets
15023 * the HBA FCoE function.
15025 lpfc_debugfs_terminate(vport);
15027 lpfc_stop_hba_timers(phba);
15028 spin_lock_irq(&phba->port_list_lock);
15029 list_del_init(&vport->listentry);
15030 spin_unlock_irq(&phba->port_list_lock);
15032 /* Perform scsi free before driver resource_unset since scsi
15033 * buffers are released to their corresponding pools here.
15035 lpfc_io_free(phba);
15036 lpfc_free_iocb_list(phba);
15037 lpfc_sli4_hba_unset(phba);
15039 lpfc_unset_driver_resource_phase2(phba);
15040 lpfc_sli4_driver_resource_unset(phba);
15042 /* Unmap adapter Control and Doorbell registers */
15043 lpfc_sli4_pci_mem_unset(phba);
15045 /* Release PCI resources and disable device's PCI function */
15046 scsi_host_put(shost);
15047 lpfc_disable_pci_dev(phba);
15049 /* Finally, free the driver's device data structure */
15050 lpfc_hba_free(phba);
15056 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15057 * @dev_d: pointer to device
15059 * This routine is called from the kernel's PCI subsystem to support system
15060 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15061 * this method, it quiesces the device by stopping the driver's worker
15062 * thread for the device, turning off device's interrupt and DMA, and bring
15063 * the device offline. Note that as the driver implements the minimum PM
15064 * requirements to a power-aware driver's PM support for suspend/resume -- all
15065 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15066 * method call will be treated as SUSPEND and the driver will fully
15067 * reinitialize its device during resume() method call, the driver will set
15068 * device to PCI_D3hot state in PCI config space instead of setting it
15069 * according to the @msg provided by the PM.
15072 * 0 - driver suspended the device
15075 static int __maybe_unused
15076 lpfc_pci_suspend_one_s4(struct device *dev_d)
15078 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15079 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15082 "2843 PCI device Power Management suspend.\n");
15084 /* Bring down the device */
15085 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15086 lpfc_offline(phba);
15087 kthread_stop(phba->worker_thread);
15089 /* Disable interrupt from device */
15090 lpfc_sli4_disable_intr(phba);
15091 lpfc_sli4_queue_destroy(phba);
15097 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15098 * @dev_d: pointer to device
15100 * This routine is called from the kernel's PCI subsystem to support system
15101 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15102 * this method, it restores the device's PCI config space state and fully
15103 * reinitializes the device and brings it online. Note that as the driver
15104 * implements the minimum PM requirements to a power-aware driver's PM for
15105 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15106 * to the suspend() method call will be treated as SUSPEND and the driver
15107 * will fully reinitialize its device during resume() method call, the device
15108 * will be set to PCI_D0 directly in PCI config space before restoring the
15112 * 0 - driver suspended the device
15115 static int __maybe_unused
15116 lpfc_pci_resume_one_s4(struct device *dev_d)
15118 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15119 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15120 uint32_t intr_mode;
15123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15124 "0292 PCI device Power Management resume.\n");
15126 /* Startup the kernel thread for this host adapter. */
15127 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15128 "lpfc_worker_%d", phba->brd_no);
15129 if (IS_ERR(phba->worker_thread)) {
15130 error = PTR_ERR(phba->worker_thread);
15131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15132 "0293 PM resume failed to start worker "
15133 "thread: error=x%x.\n", error);
15137 /* Configure and enable interrupt */
15138 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15139 if (intr_mode == LPFC_INTR_ERROR) {
15140 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15141 "0294 PM resume Failed to enable interrupt\n");
15144 phba->intr_mode = intr_mode;
15146 /* Restart HBA and bring it online */
15147 lpfc_sli_brdrestart(phba);
15150 /* Log the current active interrupt mode */
15151 lpfc_log_intr_mode(phba, phba->intr_mode);
15157 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15158 * @phba: pointer to lpfc hba data structure.
15160 * This routine is called to prepare the SLI4 device for PCI slot recover. It
15161 * aborts all the outstanding SCSI I/Os to the pci device.
15164 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15167 "2828 PCI channel I/O abort preparing for recovery\n");
15169 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15170 * and let the SCSI mid-layer to retry them to recover.
15172 lpfc_sli_abort_fcp_rings(phba);
15176 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15177 * @phba: pointer to lpfc hba data structure.
15179 * This routine is called to prepare the SLI4 device for PCI slot reset. It
15180 * disables the device interrupt and pci device, and aborts the internal FCP
15184 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15186 int offline = pci_channel_offline(phba->pcidev);
15188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15189 "2826 PCI channel disable preparing for reset offline"
15192 /* Block any management I/Os to the device */
15193 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15196 /* HBA_PCI_ERR was set in io_error_detect */
15197 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15198 /* Flush all driver's outstanding I/Os as we are to reset */
15199 lpfc_sli_flush_io_rings(phba);
15200 lpfc_offline(phba);
15202 /* stop all timers */
15203 lpfc_stop_hba_timers(phba);
15205 lpfc_sli4_queue_destroy(phba);
15206 /* Disable interrupt and pci device */
15207 lpfc_sli4_disable_intr(phba);
15208 pci_disable_device(phba->pcidev);
15212 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15213 * @phba: pointer to lpfc hba data structure.
15215 * This routine is called to prepare the SLI4 device for PCI slot permanently
15216 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15220 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15223 "2827 PCI channel permanent disable for failure\n");
15225 /* Block all SCSI devices' I/Os on the host */
15226 lpfc_scsi_dev_block(phba);
15228 /* stop all timers */
15229 lpfc_stop_hba_timers(phba);
15231 /* Clean up all driver's outstanding I/Os */
15232 lpfc_sli_flush_io_rings(phba);
15236 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15237 * @pdev: pointer to PCI device.
15238 * @state: the current PCI connection state.
15240 * This routine is called from the PCI subsystem for error handling to device
15241 * with SLI-4 interface spec. This function is called by the PCI subsystem
15242 * after a PCI bus error affecting this device has been detected. When this
15243 * function is invoked, it will need to stop all the I/Os and interrupt(s)
15244 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15245 * for the PCI subsystem to perform proper recovery as desired.
15248 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15249 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15251 static pci_ers_result_t
15252 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15254 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15255 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15259 case pci_channel_io_normal:
15260 /* Non-fatal error, prepare for recovery */
15261 lpfc_sli4_prep_dev_for_recover(phba);
15262 return PCI_ERS_RESULT_CAN_RECOVER;
15263 case pci_channel_io_frozen:
15264 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15265 /* Fatal error, prepare for slot reset */
15267 lpfc_sli4_prep_dev_for_reset(phba);
15269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15270 "2832 Already handling PCI error "
15271 "state: x%x\n", state);
15272 return PCI_ERS_RESULT_NEED_RESET;
15273 case pci_channel_io_perm_failure:
15274 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15275 /* Permanent failure, prepare for device down */
15276 lpfc_sli4_prep_dev_for_perm_failure(phba);
15277 return PCI_ERS_RESULT_DISCONNECT;
15279 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15281 lpfc_sli4_prep_dev_for_reset(phba);
15282 /* Unknown state, prepare and request slot reset */
15283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15284 "2825 Unknown PCI error state: x%x\n", state);
15285 lpfc_sli4_prep_dev_for_reset(phba);
15286 return PCI_ERS_RESULT_NEED_RESET;
15291 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15292 * @pdev: pointer to PCI device.
15294 * This routine is called from the PCI subsystem for error handling to device
15295 * with SLI-4 interface spec. It is called after PCI bus has been reset to
15296 * restart the PCI card from scratch, as if from a cold-boot. During the
15297 * PCI subsystem error recovery, after the driver returns
15298 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15299 * recovery and then call this routine before calling the .resume method to
15300 * recover the device. This function will initialize the HBA device, enable
15301 * the interrupt, but it will just put the HBA to offline state without
15302 * passing any I/O traffic.
15305 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15306 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15308 static pci_ers_result_t
15309 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15311 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15312 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15313 struct lpfc_sli *psli = &phba->sli;
15314 uint32_t intr_mode;
15317 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15318 if (pci_enable_device_mem(pdev)) {
15319 printk(KERN_ERR "lpfc: Cannot re-enable "
15320 "PCI device after reset.\n");
15321 return PCI_ERS_RESULT_DISCONNECT;
15324 pci_restore_state(pdev);
15326 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15328 dev_info(&pdev->dev,
15329 "hba_pci_err was not set, recovering slot reset.\n");
15331 * As the new kernel behavior of pci_restore_state() API call clears
15332 * device saved_state flag, need to save the restored state again.
15334 pci_save_state(pdev);
15336 if (pdev->is_busmaster)
15337 pci_set_master(pdev);
15339 spin_lock_irq(&phba->hbalock);
15340 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15341 spin_unlock_irq(&phba->hbalock);
15343 /* Init cpu_map array */
15344 lpfc_cpu_map_array_init(phba);
15345 /* Configure and enable interrupt */
15346 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15347 if (intr_mode == LPFC_INTR_ERROR) {
15348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15349 "2824 Cannot re-enable interrupt after "
15351 return PCI_ERS_RESULT_DISCONNECT;
15353 phba->intr_mode = intr_mode;
15354 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15356 /* Log the current active interrupt mode */
15357 lpfc_log_intr_mode(phba, phba->intr_mode);
15359 return PCI_ERS_RESULT_RECOVERED;
15363 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15364 * @pdev: pointer to PCI device
15366 * This routine is called from the PCI subsystem for error handling to device
15367 * with SLI-4 interface spec. It is called when kernel error recovery tells
15368 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15369 * error recovery. After this call, traffic can start to flow from this device
15373 lpfc_io_resume_s4(struct pci_dev *pdev)
15375 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15376 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15379 * In case of slot reset, as function reset is performed through
15380 * mailbox command which needs DMA to be enabled, this operation
15381 * has to be moved to the io resume phase. Taking device offline
15382 * will perform the necessary cleanup.
15384 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15385 /* Perform device reset */
15386 lpfc_sli_brdrestart(phba);
15387 /* Bring the device back online */
15393 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15394 * @pdev: pointer to PCI device
15395 * @pid: pointer to PCI device identifier
15397 * This routine is to be registered to the kernel's PCI subsystem. When an
15398 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15399 * at PCI device-specific information of the device and driver to see if the
15400 * driver state that it can support this kind of device. If the match is
15401 * successful, the driver core invokes this routine. This routine dispatches
15402 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15403 * do all the initialization that it needs to do to handle the HBA device
15407 * 0 - driver can claim the device
15408 * negative value - driver can not claim the device
15411 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15414 struct lpfc_sli_intf intf;
15416 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15419 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15420 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15421 rc = lpfc_pci_probe_one_s4(pdev, pid);
15423 rc = lpfc_pci_probe_one_s3(pdev, pid);
15429 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15430 * @pdev: pointer to PCI device
15432 * This routine is to be registered to the kernel's PCI subsystem. When an
15433 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15434 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15435 * remove routine, which will perform all the necessary cleanup for the
15436 * device to be removed from the PCI subsystem properly.
15439 lpfc_pci_remove_one(struct pci_dev *pdev)
15441 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15442 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15444 switch (phba->pci_dev_grp) {
15445 case LPFC_PCI_DEV_LP:
15446 lpfc_pci_remove_one_s3(pdev);
15448 case LPFC_PCI_DEV_OC:
15449 lpfc_pci_remove_one_s4(pdev);
15452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15453 "1424 Invalid PCI device group: 0x%x\n",
15454 phba->pci_dev_grp);
15461 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15462 * @dev: pointer to device
15464 * This routine is to be registered to the kernel's PCI subsystem to support
15465 * system Power Management (PM). When PM invokes this method, it dispatches
15466 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15467 * suspend the device.
15470 * 0 - driver suspended the device
15473 static int __maybe_unused
15474 lpfc_pci_suspend_one(struct device *dev)
15476 struct Scsi_Host *shost = dev_get_drvdata(dev);
15477 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15480 switch (phba->pci_dev_grp) {
15481 case LPFC_PCI_DEV_LP:
15482 rc = lpfc_pci_suspend_one_s3(dev);
15484 case LPFC_PCI_DEV_OC:
15485 rc = lpfc_pci_suspend_one_s4(dev);
15488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15489 "1425 Invalid PCI device group: 0x%x\n",
15490 phba->pci_dev_grp);
15497 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15498 * @dev: pointer to device
15500 * This routine is to be registered to the kernel's PCI subsystem to support
15501 * system Power Management (PM). When PM invokes this method, it dispatches
15502 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15503 * resume the device.
15506 * 0 - driver suspended the device
15509 static int __maybe_unused
15510 lpfc_pci_resume_one(struct device *dev)
15512 struct Scsi_Host *shost = dev_get_drvdata(dev);
15513 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15516 switch (phba->pci_dev_grp) {
15517 case LPFC_PCI_DEV_LP:
15518 rc = lpfc_pci_resume_one_s3(dev);
15520 case LPFC_PCI_DEV_OC:
15521 rc = lpfc_pci_resume_one_s4(dev);
15524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15525 "1426 Invalid PCI device group: 0x%x\n",
15526 phba->pci_dev_grp);
15533 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15534 * @pdev: pointer to PCI device.
15535 * @state: the current PCI connection state.
15537 * This routine is registered to the PCI subsystem for error handling. This
15538 * function is called by the PCI subsystem after a PCI bus error affecting
15539 * this device has been detected. When this routine is invoked, it dispatches
15540 * the action to the proper SLI-3 or SLI-4 device error detected handling
15541 * routine, which will perform the proper error detected operation.
15544 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15545 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15547 static pci_ers_result_t
15548 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15550 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15551 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15552 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15554 if (phba->link_state == LPFC_HBA_ERROR &&
15555 phba->hba_flag & HBA_IOQ_FLUSH)
15556 return PCI_ERS_RESULT_NEED_RESET;
15558 switch (phba->pci_dev_grp) {
15559 case LPFC_PCI_DEV_LP:
15560 rc = lpfc_io_error_detected_s3(pdev, state);
15562 case LPFC_PCI_DEV_OC:
15563 rc = lpfc_io_error_detected_s4(pdev, state);
15566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567 "1427 Invalid PCI device group: 0x%x\n",
15568 phba->pci_dev_grp);
15575 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15576 * @pdev: pointer to PCI device.
15578 * This routine is registered to the PCI subsystem for error handling. This
15579 * function is called after PCI bus has been reset to restart the PCI card
15580 * from scratch, as if from a cold-boot. When this routine is invoked, it
15581 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15582 * routine, which will perform the proper device reset.
15585 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
15586 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15588 static pci_ers_result_t
15589 lpfc_io_slot_reset(struct pci_dev *pdev)
15591 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15592 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15593 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15595 switch (phba->pci_dev_grp) {
15596 case LPFC_PCI_DEV_LP:
15597 rc = lpfc_io_slot_reset_s3(pdev);
15599 case LPFC_PCI_DEV_OC:
15600 rc = lpfc_io_slot_reset_s4(pdev);
15603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15604 "1428 Invalid PCI device group: 0x%x\n",
15605 phba->pci_dev_grp);
15612 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15613 * @pdev: pointer to PCI device
15615 * This routine is registered to the PCI subsystem for error handling. It
15616 * is called when kernel error recovery tells the lpfc driver that it is
15617 * OK to resume normal PCI operation after PCI bus error recovery. When
15618 * this routine is invoked, it dispatches the action to the proper SLI-3
15619 * or SLI-4 device io_resume routine, which will resume the device operation.
15622 lpfc_io_resume(struct pci_dev *pdev)
15624 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15625 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15627 switch (phba->pci_dev_grp) {
15628 case LPFC_PCI_DEV_LP:
15629 lpfc_io_resume_s3(pdev);
15631 case LPFC_PCI_DEV_OC:
15632 lpfc_io_resume_s4(pdev);
15635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15636 "1429 Invalid PCI device group: 0x%x\n",
15637 phba->pci_dev_grp);
15644 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15645 * @phba: pointer to lpfc hba data structure.
15647 * This routine checks to see if OAS is supported for this adapter. If
15648 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
15649 * the enable oas flag is cleared and the pool created for OAS device data
15654 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15657 if (!phba->cfg_EnableXLane)
15660 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15664 mempool_destroy(phba->device_data_mem_pool);
15665 phba->device_data_mem_pool = NULL;
15672 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15673 * @phba: pointer to lpfc hba data structure.
15675 * This routine checks to see if RAS is supported by the adapter. Check the
15676 * function through which RAS support enablement is to be done.
15679 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15681 /* if ASIC_GEN_NUM >= 0xC) */
15682 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15683 LPFC_SLI_INTF_IF_TYPE_6) ||
15684 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15685 LPFC_SLI_INTF_FAMILY_G6)) {
15686 phba->ras_fwlog.ras_hwsupport = true;
15687 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15688 phba->cfg_ras_fwlog_buffsize)
15689 phba->ras_fwlog.ras_enabled = true;
15691 phba->ras_fwlog.ras_enabled = false;
15693 phba->ras_fwlog.ras_hwsupport = false;
15698 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15700 static const struct pci_error_handlers lpfc_err_handler = {
15701 .error_detected = lpfc_io_error_detected,
15702 .slot_reset = lpfc_io_slot_reset,
15703 .resume = lpfc_io_resume,
15706 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15707 lpfc_pci_suspend_one,
15708 lpfc_pci_resume_one);
15710 static struct pci_driver lpfc_driver = {
15711 .name = LPFC_DRIVER_NAME,
15712 .id_table = lpfc_id_table,
15713 .probe = lpfc_pci_probe_one,
15714 .remove = lpfc_pci_remove_one,
15715 .shutdown = lpfc_pci_remove_one,
15716 .driver.pm = &lpfc_pci_pm_ops_one,
15717 .err_handler = &lpfc_err_handler,
15720 static const struct file_operations lpfc_mgmt_fop = {
15721 .owner = THIS_MODULE,
15724 static struct miscdevice lpfc_mgmt_dev = {
15725 .minor = MISC_DYNAMIC_MINOR,
15726 .name = "lpfcmgmt",
15727 .fops = &lpfc_mgmt_fop,
15731 * lpfc_init - lpfc module initialization routine
15733 * This routine is to be invoked when the lpfc module is loaded into the
15734 * kernel. The special kernel macro module_init() is used to indicate the
15735 * role of this routine to the kernel as lpfc module entry point.
15739 * -ENOMEM - FC attach transport failed
15740 * all others - failed
15747 pr_info(LPFC_MODULE_DESC "\n");
15748 pr_info(LPFC_COPYRIGHT "\n");
15750 error = misc_register(&lpfc_mgmt_dev);
15752 printk(KERN_ERR "Could not register lpfcmgmt device, "
15753 "misc_register returned with status %d", error);
15756 lpfc_transport_functions.vport_create = lpfc_vport_create;
15757 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15758 lpfc_transport_template =
15759 fc_attach_transport(&lpfc_transport_functions);
15760 if (lpfc_transport_template == NULL)
15762 lpfc_vport_transport_template =
15763 fc_attach_transport(&lpfc_vport_transport_functions);
15764 if (lpfc_vport_transport_template == NULL) {
15765 fc_release_transport(lpfc_transport_template);
15768 lpfc_wqe_cmd_template();
15769 lpfc_nvmet_cmd_template();
15771 /* Initialize in case vector mapping is needed */
15772 lpfc_present_cpu = num_present_cpus();
15774 lpfc_pldv_detect = false;
15776 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15777 "lpfc/sli4:online",
15778 lpfc_cpu_online, lpfc_cpu_offline);
15780 goto cpuhp_failure;
15781 lpfc_cpuhp_state = error;
15783 error = pci_register_driver(&lpfc_driver);
15790 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15792 fc_release_transport(lpfc_transport_template);
15793 fc_release_transport(lpfc_vport_transport_template);
15795 misc_deregister(&lpfc_mgmt_dev);
15800 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15802 unsigned int start_idx;
15803 unsigned int dbg_cnt;
15804 unsigned int temp_idx;
15807 unsigned long rem_nsec;
15809 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15812 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15813 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15816 temp_idx = start_idx;
15817 if (dbg_cnt >= DBG_LOG_SZ) {
15818 dbg_cnt = DBG_LOG_SZ;
15821 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15822 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15824 if (start_idx < dbg_cnt)
15825 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15827 start_idx -= dbg_cnt;
15830 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15831 start_idx, temp_idx, dbg_cnt);
15833 for (i = 0; i < dbg_cnt; i++) {
15834 if ((start_idx + i) < DBG_LOG_SZ)
15835 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15838 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15839 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15841 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15843 phba->dbg_log[temp_idx].log);
15846 atomic_set(&phba->dbg_log_cnt, 0);
15847 atomic_set(&phba->dbg_log_dmping, 0);
15851 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15855 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15856 struct va_format vaf;
15859 va_start(args, fmt);
15860 if (unlikely(dbg_dmping)) {
15863 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15867 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15870 atomic_inc(&phba->dbg_log_cnt);
15872 vscnprintf(phba->dbg_log[idx].log,
15873 sizeof(phba->dbg_log[idx].log), fmt, args);
15876 phba->dbg_log[idx].t_ns = local_clock();
15880 * lpfc_exit - lpfc module removal routine
15882 * This routine is invoked when the lpfc module is removed from the kernel.
15883 * The special kernel macro module_exit() is used to indicate the role of
15884 * this routine to the kernel as lpfc module exit point.
15889 misc_deregister(&lpfc_mgmt_dev);
15890 pci_unregister_driver(&lpfc_driver);
15891 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15892 fc_release_transport(lpfc_transport_template);
15893 fc_release_transport(lpfc_vport_transport_template);
15894 idr_destroy(&lpfc_hba_index);
15897 module_init(lpfc_init);
15898 module_exit(lpfc_exit);
15899 MODULE_LICENSE("GPL");
15900 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15901 MODULE_AUTHOR("Broadcom");
15902 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);