1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2015 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
36 #include <linux/percpu.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_transport_fc.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
51 #include "lpfc_logmsg.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_version.h"
57 unsigned long _dump_buf_data_order;
59 unsigned long _dump_buf_dif_order;
60 spinlock_t _dump_buf_lock;
62 /* Used when mapping IRQ vectors in a driver centric manner */
63 uint16_t *lpfc_used_cpu;
64 uint32_t lpfc_present_cpu;
66 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
67 static int lpfc_post_rcv_buf(struct lpfc_hba *);
68 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
69 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
70 static int lpfc_setup_endian_order(struct lpfc_hba *);
71 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
72 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
73 static void lpfc_init_sgl_list(struct lpfc_hba *);
74 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
75 static void lpfc_free_active_sgl(struct lpfc_hba *);
76 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
77 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
78 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
79 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
80 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
81 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
82 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
83 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
85 static struct scsi_transport_template *lpfc_transport_template = NULL;
86 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
87 static DEFINE_IDR(lpfc_hba_index);
90 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
91 * @phba: pointer to lpfc hba data structure.
93 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
94 * mailbox command. It retrieves the revision information from the HBA and
95 * collects the Vital Product Data (VPD) about the HBA for preparing the
96 * configuration of the HBA.
100 * -ERESTART - requests the SLI layer to reset the HBA and try again.
101 * Any other value - indicates an error.
104 lpfc_config_port_prep(struct lpfc_hba *phba)
106 lpfc_vpd_t *vp = &phba->vpd;
110 char *lpfc_vpd_data = NULL;
112 static char licensed[56] =
113 "key unlock for use with gnu public licensed code only\0";
114 static int init_key = 1;
116 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
118 phba->link_state = LPFC_HBA_ERROR;
123 phba->link_state = LPFC_INIT_MBX_CMDS;
125 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
127 uint32_t *ptext = (uint32_t *) licensed;
129 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
130 *ptext = cpu_to_be32(*ptext);
134 lpfc_read_nv(phba, pmb);
135 memset((char*)mb->un.varRDnvp.rsvd3, 0,
136 sizeof (mb->un.varRDnvp.rsvd3));
137 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
142 if (rc != MBX_SUCCESS) {
143 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
144 "0324 Config Port initialization "
145 "error, mbxCmd x%x READ_NVPARM, "
147 mb->mbxCommand, mb->mbxStatus);
148 mempool_free(pmb, phba->mbox_mem_pool);
151 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
153 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
157 phba->sli3_options = 0x0;
159 /* Setup and issue mailbox READ REV command */
160 lpfc_read_rev(phba, pmb);
161 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
162 if (rc != MBX_SUCCESS) {
163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
164 "0439 Adapter failed to init, mbxCmd x%x "
165 "READ_REV, mbxStatus x%x\n",
166 mb->mbxCommand, mb->mbxStatus);
167 mempool_free( pmb, phba->mbox_mem_pool);
173 * The value of rr must be 1 since the driver set the cv field to 1.
174 * This setting requires the FW to set all revision fields.
176 if (mb->un.varRdRev.rr == 0) {
178 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
179 "0440 Adapter failed to init, READ_REV has "
180 "missing revision information.\n");
181 mempool_free(pmb, phba->mbox_mem_pool);
185 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
186 mempool_free(pmb, phba->mbox_mem_pool);
190 /* Save information as VPD data */
192 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
193 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
194 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
195 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
196 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
197 vp->rev.biuRev = mb->un.varRdRev.biuRev;
198 vp->rev.smRev = mb->un.varRdRev.smRev;
199 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
200 vp->rev.endecRev = mb->un.varRdRev.endecRev;
201 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
202 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
203 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
204 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
205 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
206 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
208 /* If the sli feature level is less then 9, we must
209 * tear down all RPIs and VPIs on link down if NPIV
212 if (vp->rev.feaLevelHigh < 9)
213 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
215 if (lpfc_is_LC_HBA(phba->pcidev->device))
216 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
217 sizeof (phba->RandomData));
219 /* Get adapter VPD information */
220 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
224 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
225 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
227 if (rc != MBX_SUCCESS) {
228 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
229 "0441 VPD not present on adapter, "
230 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
231 mb->mbxCommand, mb->mbxStatus);
232 mb->un.varDmp.word_cnt = 0;
234 /* dump mem may return a zero when finished or we got a
235 * mailbox error, either way we are done.
237 if (mb->un.varDmp.word_cnt == 0)
239 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
240 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
241 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
242 lpfc_vpd_data + offset,
243 mb->un.varDmp.word_cnt);
244 offset += mb->un.varDmp.word_cnt;
245 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
246 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
248 kfree(lpfc_vpd_data);
250 mempool_free(pmb, phba->mbox_mem_pool);
255 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
256 * @phba: pointer to lpfc hba data structure.
257 * @pmboxq: pointer to the driver internal queue element for mailbox command.
259 * This is the completion handler for driver's configuring asynchronous event
260 * mailbox command to the device. If the mailbox command returns successfully,
261 * it will set internal async event support flag to 1; otherwise, it will
262 * set internal async event support flag to 0.
265 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
267 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
268 phba->temp_sensor_support = 1;
270 phba->temp_sensor_support = 0;
271 mempool_free(pmboxq, phba->mbox_mem_pool);
276 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
277 * @phba: pointer to lpfc hba data structure.
278 * @pmboxq: pointer to the driver internal queue element for mailbox command.
280 * This is the completion handler for dump mailbox command for getting
281 * wake up parameters. When this command complete, the response contain
282 * Option rom version of the HBA. This function translate the version number
283 * into a human readable string and store it in OptionROMVersion.
286 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
289 uint32_t prog_id_word;
291 /* character array used for decoding dist type. */
292 char dist_char[] = "nabx";
294 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
295 mempool_free(pmboxq, phba->mbox_mem_pool);
299 prg = (struct prog_id *) &prog_id_word;
301 /* word 7 contain option rom version */
302 prog_id_word = pmboxq->u.mb.un.varWords[7];
304 /* Decode the Option rom version word to a readable string */
306 dist = dist_char[prg->dist];
308 if ((prg->dist == 3) && (prg->num == 0))
309 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
310 prg->ver, prg->rev, prg->lev);
312 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
313 prg->ver, prg->rev, prg->lev,
315 mempool_free(pmboxq, phba->mbox_mem_pool);
320 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
321 * cfg_soft_wwnn, cfg_soft_wwpn
322 * @vport: pointer to lpfc vport data structure.
329 lpfc_update_vport_wwn(struct lpfc_vport *vport)
331 /* If the soft name exists then update it using the service params */
332 if (vport->phba->cfg_soft_wwnn)
333 u64_to_wwn(vport->phba->cfg_soft_wwnn,
334 vport->fc_sparam.nodeName.u.wwn);
335 if (vport->phba->cfg_soft_wwpn)
336 u64_to_wwn(vport->phba->cfg_soft_wwpn,
337 vport->fc_sparam.portName.u.wwn);
340 * If the name is empty or there exists a soft name
341 * then copy the service params name, otherwise use the fc name
343 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
344 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
345 sizeof(struct lpfc_name));
347 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
348 sizeof(struct lpfc_name));
350 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
351 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
352 sizeof(struct lpfc_name));
354 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
355 sizeof(struct lpfc_name));
359 * lpfc_config_port_post - Perform lpfc initialization after config port
360 * @phba: pointer to lpfc hba data structure.
362 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
363 * command call. It performs all internal resource and state setups on the
364 * port: post IOCB buffers, enable appropriate host interrupt attentions,
365 * ELS ring timers, etc.
369 * Any other value - error.
372 lpfc_config_port_post(struct lpfc_hba *phba)
374 struct lpfc_vport *vport = phba->pport;
375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
378 struct lpfc_dmabuf *mp;
379 struct lpfc_sli *psli = &phba->sli;
380 uint32_t status, timeout;
384 spin_lock_irq(&phba->hbalock);
386 * If the Config port completed correctly the HBA is not
387 * over heated any more.
389 if (phba->over_temp_state == HBA_OVER_TEMP)
390 phba->over_temp_state = HBA_NORMAL_TEMP;
391 spin_unlock_irq(&phba->hbalock);
393 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
395 phba->link_state = LPFC_HBA_ERROR;
400 /* Get login parameters for NID. */
401 rc = lpfc_read_sparam(phba, pmb, 0);
403 mempool_free(pmb, phba->mbox_mem_pool);
408 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
410 "0448 Adapter failed init, mbxCmd x%x "
411 "READ_SPARM mbxStatus x%x\n",
412 mb->mbxCommand, mb->mbxStatus);
413 phba->link_state = LPFC_HBA_ERROR;
414 mp = (struct lpfc_dmabuf *) pmb->context1;
415 mempool_free(pmb, phba->mbox_mem_pool);
416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
421 mp = (struct lpfc_dmabuf *) pmb->context1;
423 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
424 lpfc_mbuf_free(phba, mp->virt, mp->phys);
426 pmb->context1 = NULL;
427 lpfc_update_vport_wwn(vport);
429 /* Update the fc_host data structures with new wwn. */
430 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
431 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
432 fc_host_max_npiv_vports(shost) = phba->max_vpi;
434 /* If no serial number in VPD data, use low 6 bytes of WWNN */
435 /* This should be consolidated into parse_vpd ? - mr */
436 if (phba->SerialNumber[0] == 0) {
439 outptr = &vport->fc_nodename.u.s.IEEE[0];
440 for (i = 0; i < 12; i++) {
442 j = ((status & 0xf0) >> 4);
444 phba->SerialNumber[i] =
445 (char)((uint8_t) 0x30 + (uint8_t) j);
447 phba->SerialNumber[i] =
448 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
452 phba->SerialNumber[i] =
453 (char)((uint8_t) 0x30 + (uint8_t) j);
455 phba->SerialNumber[i] =
456 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
460 lpfc_read_config(phba, pmb);
462 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
464 "0453 Adapter failed to init, mbxCmd x%x "
465 "READ_CONFIG, mbxStatus x%x\n",
466 mb->mbxCommand, mb->mbxStatus);
467 phba->link_state = LPFC_HBA_ERROR;
468 mempool_free( pmb, phba->mbox_mem_pool);
472 /* Check if the port is disabled */
473 lpfc_sli_read_link_ste(phba);
475 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
476 i = (mb->un.varRdConfig.max_xri + 1);
477 if (phba->cfg_hba_queue_depth > i) {
478 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
479 "3359 HBA queue depth changed from %d to %d\n",
480 phba->cfg_hba_queue_depth, i);
481 phba->cfg_hba_queue_depth = i;
484 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
485 i = (mb->un.varRdConfig.max_xri >> 3);
486 if (phba->pport->cfg_lun_queue_depth > i) {
487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
488 "3360 LUN queue depth changed from %d to %d\n",
489 phba->pport->cfg_lun_queue_depth, i);
490 phba->pport->cfg_lun_queue_depth = i;
493 phba->lmt = mb->un.varRdConfig.lmt;
495 /* Get the default values for Model Name and Description */
496 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
498 phba->link_state = LPFC_LINK_DOWN;
500 /* Only process IOCBs on ELS ring till hba_state is READY */
501 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
502 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
503 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
504 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
505 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
506 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
508 /* Post receive buffers for desired rings */
509 if (phba->sli_rev != 3)
510 lpfc_post_rcv_buf(phba);
513 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
515 if (phba->intr_type == MSIX) {
516 rc = lpfc_config_msi(phba, pmb);
518 mempool_free(pmb, phba->mbox_mem_pool);
521 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
522 if (rc != MBX_SUCCESS) {
523 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
524 "0352 Config MSI mailbox command "
525 "failed, mbxCmd x%x, mbxStatus x%x\n",
526 pmb->u.mb.mbxCommand,
527 pmb->u.mb.mbxStatus);
528 mempool_free(pmb, phba->mbox_mem_pool);
533 spin_lock_irq(&phba->hbalock);
534 /* Initialize ERATT handling flag */
535 phba->hba_flag &= ~HBA_ERATT_HANDLED;
537 /* Enable appropriate host interrupts */
538 if (lpfc_readl(phba->HCregaddr, &status)) {
539 spin_unlock_irq(&phba->hbalock);
542 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
543 if (psli->num_rings > 0)
544 status |= HC_R0INT_ENA;
545 if (psli->num_rings > 1)
546 status |= HC_R1INT_ENA;
547 if (psli->num_rings > 2)
548 status |= HC_R2INT_ENA;
549 if (psli->num_rings > 3)
550 status |= HC_R3INT_ENA;
552 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
553 (phba->cfg_poll & DISABLE_FCP_RING_INT))
554 status &= ~(HC_R0INT_ENA);
556 writel(status, phba->HCregaddr);
557 readl(phba->HCregaddr); /* flush */
558 spin_unlock_irq(&phba->hbalock);
560 /* Set up ring-0 (ELS) timer */
561 timeout = phba->fc_ratov * 2;
562 mod_timer(&vport->els_tmofunc,
563 jiffies + msecs_to_jiffies(1000 * timeout));
564 /* Set up heart beat (HB) timer */
565 mod_timer(&phba->hb_tmofunc,
566 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
567 phba->hb_outstanding = 0;
568 phba->last_completion_time = jiffies;
569 /* Set up error attention (ERATT) polling timer */
570 mod_timer(&phba->eratt_poll,
571 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
573 if (phba->hba_flag & LINK_DISABLED) {
574 lpfc_printf_log(phba,
576 "2598 Adapter Link is disabled.\n");
577 lpfc_down_link(phba, pmb);
578 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
579 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
580 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
581 lpfc_printf_log(phba,
583 "2599 Adapter failed to issue DOWN_LINK"
584 " mbox command rc 0x%x\n", rc);
586 mempool_free(pmb, phba->mbox_mem_pool);
589 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
590 mempool_free(pmb, phba->mbox_mem_pool);
591 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
595 /* MBOX buffer will be freed in mbox compl */
596 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
598 phba->link_state = LPFC_HBA_ERROR;
602 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
603 pmb->mbox_cmpl = lpfc_config_async_cmpl;
604 pmb->vport = phba->pport;
605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
608 lpfc_printf_log(phba,
611 "0456 Adapter failed to issue "
612 "ASYNCEVT_ENABLE mbox status x%x\n",
614 mempool_free(pmb, phba->mbox_mem_pool);
617 /* Get Option rom version */
618 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
620 phba->link_state = LPFC_HBA_ERROR;
624 lpfc_dump_wakeup_param(phba, pmb);
625 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
626 pmb->vport = phba->pport;
627 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
629 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
631 "to get Option ROM version status x%x\n", rc);
632 mempool_free(pmb, phba->mbox_mem_pool);
639 * lpfc_hba_init_link - Initialize the FC link
640 * @phba: pointer to lpfc hba data structure.
641 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
643 * This routine will issue the INIT_LINK mailbox command call.
644 * It is available to other drivers through the lpfc_hba data
645 * structure for use as a delayed link up mechanism with the
646 * module parameter lpfc_suppress_link_up.
650 * Any other value - error
653 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
659 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
660 * @phba: pointer to lpfc hba data structure.
661 * @fc_topology: desired fc topology.
662 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
664 * This routine will issue the INIT_LINK mailbox command call.
665 * It is available to other drivers through the lpfc_hba data
666 * structure for use as a delayed link up mechanism with the
667 * module parameter lpfc_suppress_link_up.
671 * Any other value - error
674 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
677 struct lpfc_vport *vport = phba->pport;
682 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
684 phba->link_state = LPFC_HBA_ERROR;
690 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
691 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
692 !(phba->lmt & LMT_1Gb)) ||
693 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
694 !(phba->lmt & LMT_2Gb)) ||
695 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
696 !(phba->lmt & LMT_4Gb)) ||
697 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
698 !(phba->lmt & LMT_8Gb)) ||
699 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
700 !(phba->lmt & LMT_10Gb)) ||
701 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
702 !(phba->lmt & LMT_16Gb)) ||
703 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
704 !(phba->lmt & LMT_32Gb))) {
705 /* Reset link speed to auto */
706 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
707 "1302 Invalid speed for this board:%d "
708 "Reset link speed to auto.\n",
709 phba->cfg_link_speed);
710 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
712 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
713 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
714 if (phba->sli_rev < LPFC_SLI_REV4)
715 lpfc_set_loopback_flag(phba);
716 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
717 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
719 "0498 Adapter failed to init, mbxCmd x%x "
720 "INIT_LINK, mbxStatus x%x\n",
721 mb->mbxCommand, mb->mbxStatus);
722 if (phba->sli_rev <= LPFC_SLI_REV3) {
723 /* Clear all interrupt enable conditions */
724 writel(0, phba->HCregaddr);
725 readl(phba->HCregaddr); /* flush */
726 /* Clear all pending interrupts */
727 writel(0xffffffff, phba->HAregaddr);
728 readl(phba->HAregaddr); /* flush */
730 phba->link_state = LPFC_HBA_ERROR;
731 if (rc != MBX_BUSY || flag == MBX_POLL)
732 mempool_free(pmb, phba->mbox_mem_pool);
735 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
736 if (flag == MBX_POLL)
737 mempool_free(pmb, phba->mbox_mem_pool);
743 * lpfc_hba_down_link - this routine downs the FC link
744 * @phba: pointer to lpfc hba data structure.
745 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
747 * This routine will issue the DOWN_LINK mailbox command call.
748 * It is available to other drivers through the lpfc_hba data
749 * structure for use to stop the link.
753 * Any other value - error
756 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
761 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
763 phba->link_state = LPFC_HBA_ERROR;
767 lpfc_printf_log(phba,
769 "0491 Adapter Link is disabled.\n");
770 lpfc_down_link(phba, pmb);
771 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
772 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
773 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
774 lpfc_printf_log(phba,
776 "2522 Adapter failed to issue DOWN_LINK"
777 " mbox command rc 0x%x\n", rc);
779 mempool_free(pmb, phba->mbox_mem_pool);
782 if (flag == MBX_POLL)
783 mempool_free(pmb, phba->mbox_mem_pool);
789 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
790 * @phba: pointer to lpfc HBA data structure.
792 * This routine will do LPFC uninitialization before the HBA is reset when
793 * bringing down the SLI Layer.
797 * Any other value - error.
800 lpfc_hba_down_prep(struct lpfc_hba *phba)
802 struct lpfc_vport **vports;
805 if (phba->sli_rev <= LPFC_SLI_REV3) {
806 /* Disable interrupts */
807 writel(0, phba->HCregaddr);
808 readl(phba->HCregaddr); /* flush */
811 if (phba->pport->load_flag & FC_UNLOADING)
812 lpfc_cleanup_discovery_resources(phba->pport);
814 vports = lpfc_create_vport_work_array(phba);
816 for (i = 0; i <= phba->max_vports &&
817 vports[i] != NULL; i++)
818 lpfc_cleanup_discovery_resources(vports[i]);
819 lpfc_destroy_vport_work_array(phba, vports);
825 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
826 * rspiocb which got deferred
828 * @phba: pointer to lpfc HBA data structure.
830 * This routine will cleanup completed slow path events after HBA is reset
831 * when bringing down the SLI Layer.
838 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
840 struct lpfc_iocbq *rspiocbq;
841 struct hbq_dmabuf *dmabuf;
842 struct lpfc_cq_event *cq_event;
844 spin_lock_irq(&phba->hbalock);
845 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
846 spin_unlock_irq(&phba->hbalock);
848 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
849 /* Get the response iocb from the head of work queue */
850 spin_lock_irq(&phba->hbalock);
851 list_remove_head(&phba->sli4_hba.sp_queue_event,
852 cq_event, struct lpfc_cq_event, list);
853 spin_unlock_irq(&phba->hbalock);
855 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
856 case CQE_CODE_COMPL_WQE:
857 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
859 lpfc_sli_release_iocbq(phba, rspiocbq);
861 case CQE_CODE_RECEIVE:
862 case CQE_CODE_RECEIVE_V1:
863 dmabuf = container_of(cq_event, struct hbq_dmabuf,
865 lpfc_in_buf_free(phba, &dmabuf->dbuf);
871 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
872 * @phba: pointer to lpfc HBA data structure.
874 * This routine will cleanup posted ELS buffers after the HBA is reset
875 * when bringing down the SLI Layer.
882 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
884 struct lpfc_sli *psli = &phba->sli;
885 struct lpfc_sli_ring *pring;
886 struct lpfc_dmabuf *mp, *next_mp;
890 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
891 lpfc_sli_hbqbuf_free_all(phba);
893 /* Cleanup preposted buffers on the ELS ring */
894 pring = &psli->ring[LPFC_ELS_RING];
895 spin_lock_irq(&phba->hbalock);
896 list_splice_init(&pring->postbufq, &buflist);
897 spin_unlock_irq(&phba->hbalock);
900 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
903 lpfc_mbuf_free(phba, mp->virt, mp->phys);
907 spin_lock_irq(&phba->hbalock);
908 pring->postbufq_cnt -= count;
909 spin_unlock_irq(&phba->hbalock);
914 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
915 * @phba: pointer to lpfc HBA data structure.
917 * This routine will cleanup the txcmplq after the HBA is reset when bringing
918 * down the SLI Layer.
924 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
926 struct lpfc_sli *psli = &phba->sli;
927 struct lpfc_sli_ring *pring;
928 LIST_HEAD(completions);
931 for (i = 0; i < psli->num_rings; i++) {
932 pring = &psli->ring[i];
933 if (phba->sli_rev >= LPFC_SLI_REV4)
934 spin_lock_irq(&pring->ring_lock);
936 spin_lock_irq(&phba->hbalock);
937 /* At this point in time the HBA is either reset or DOA. Either
938 * way, nothing should be on txcmplq as it will NEVER complete.
940 list_splice_init(&pring->txcmplq, &completions);
941 pring->txcmplq_cnt = 0;
943 if (phba->sli_rev >= LPFC_SLI_REV4)
944 spin_unlock_irq(&pring->ring_lock);
946 spin_unlock_irq(&phba->hbalock);
948 /* Cancel all the IOCBs from the completions list */
949 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
951 lpfc_sli_abort_iocb_ring(phba, pring);
956 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
958 * @phba: pointer to lpfc HBA data structure.
960 * This routine will do uninitialization after the HBA is reset when bring
961 * down the SLI Layer.
965 * Any other value - error.
968 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
970 lpfc_hba_free_post_buf(phba);
971 lpfc_hba_clean_txcmplq(phba);
976 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
977 * @phba: pointer to lpfc HBA data structure.
979 * This routine will do uninitialization after the HBA is reset when bring
980 * down the SLI Layer.
984 * Any other value - error.
987 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
989 struct lpfc_scsi_buf *psb, *psb_next;
991 unsigned long iflag = 0;
992 struct lpfc_sglq *sglq_entry = NULL;
993 struct lpfc_sli *psli = &phba->sli;
994 struct lpfc_sli_ring *pring;
996 lpfc_hba_free_post_buf(phba);
997 lpfc_hba_clean_txcmplq(phba);
998 pring = &psli->ring[LPFC_ELS_RING];
1000 /* At this point in time the HBA is either reset or DOA. Either
1001 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1002 * on the lpfc_sgl_list so that it can either be freed if the
1003 * driver is unloading or reposted if the driver is restarting
1006 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
1008 /* abts_sgl_list_lock required because worker thread uses this
1011 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
1012 list_for_each_entry(sglq_entry,
1013 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1014 sglq_entry->state = SGL_FREED;
1016 spin_lock(&pring->ring_lock);
1017 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1018 &phba->sli4_hba.lpfc_sgl_list);
1019 spin_unlock(&pring->ring_lock);
1020 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
1021 /* abts_scsi_buf_list_lock required because worker thread uses this
1024 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1025 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1027 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1028 spin_unlock_irq(&phba->hbalock);
1030 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1032 psb->status = IOSTAT_SUCCESS;
1034 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1035 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1036 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1038 lpfc_sli4_free_sp_events(phba);
1043 * lpfc_hba_down_post - Wrapper func for hba down post routine
1044 * @phba: pointer to lpfc HBA data structure.
1046 * This routine wraps the actual SLI3 or SLI4 routine for performing
1047 * uninitialization after the HBA is reset when bring down the SLI Layer.
1051 * Any other value - error.
1054 lpfc_hba_down_post(struct lpfc_hba *phba)
1056 return (*phba->lpfc_hba_down_post)(phba);
1060 * lpfc_hb_timeout - The HBA-timer timeout handler
1061 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1063 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1064 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1065 * work-port-events bitmap and the worker thread is notified. This timeout
1066 * event will be used by the worker thread to invoke the actual timeout
1067 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1068 * be performed in the timeout handler and the HBA timeout event bit shall
1069 * be cleared by the worker thread after it has taken the event bitmap out.
1072 lpfc_hb_timeout(unsigned long ptr)
1074 struct lpfc_hba *phba;
1075 uint32_t tmo_posted;
1076 unsigned long iflag;
1078 phba = (struct lpfc_hba *)ptr;
1080 /* Check for heart beat timeout conditions */
1081 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1082 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1084 phba->pport->work_port_events |= WORKER_HB_TMO;
1085 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1087 /* Tell the worker thread there is work to do */
1089 lpfc_worker_wake_up(phba);
1094 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1095 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1097 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1098 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1099 * work-port-events bitmap and the worker thread is notified. This timeout
1100 * event will be used by the worker thread to invoke the actual timeout
1101 * handler routine, lpfc_rrq_handler. Any periodical operations will
1102 * be performed in the timeout handler and the RRQ timeout event bit shall
1103 * be cleared by the worker thread after it has taken the event bitmap out.
1106 lpfc_rrq_timeout(unsigned long ptr)
1108 struct lpfc_hba *phba;
1109 unsigned long iflag;
1111 phba = (struct lpfc_hba *)ptr;
1112 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1113 if (!(phba->pport->load_flag & FC_UNLOADING))
1114 phba->hba_flag |= HBA_RRQ_ACTIVE;
1116 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1117 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1119 if (!(phba->pport->load_flag & FC_UNLOADING))
1120 lpfc_worker_wake_up(phba);
1124 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1125 * @phba: pointer to lpfc hba data structure.
1126 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1128 * This is the callback function to the lpfc heart-beat mailbox command.
1129 * If configured, the lpfc driver issues the heart-beat mailbox command to
1130 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1131 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1132 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1133 * heart-beat outstanding state. Once the mailbox command comes back and
1134 * no error conditions detected, the heart-beat mailbox command timer is
1135 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1136 * state is cleared for the next heart-beat. If the timer expired with the
1137 * heart-beat outstanding state set, the driver will put the HBA offline.
1140 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1142 unsigned long drvr_flag;
1144 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1145 phba->hb_outstanding = 0;
1146 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1148 /* Check and reset heart-beat timer is necessary */
1149 mempool_free(pmboxq, phba->mbox_mem_pool);
1150 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1151 !(phba->link_state == LPFC_HBA_ERROR) &&
1152 !(phba->pport->load_flag & FC_UNLOADING))
1153 mod_timer(&phba->hb_tmofunc,
1155 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1160 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1161 * @phba: pointer to lpfc hba data structure.
1163 * This is the actual HBA-timer timeout handler to be invoked by the worker
1164 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1165 * handler performs any periodic operations needed for the device. If such
1166 * periodic event has already been attended to either in the interrupt handler
1167 * or by processing slow-ring or fast-ring events within the HBA-timer
1168 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1169 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1170 * is configured and there is no heart-beat mailbox command outstanding, a
1171 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1172 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1176 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1178 struct lpfc_vport **vports;
1179 LPFC_MBOXQ_t *pmboxq;
1180 struct lpfc_dmabuf *buf_ptr;
1182 struct lpfc_sli *psli = &phba->sli;
1183 LIST_HEAD(completions);
1185 vports = lpfc_create_vport_work_array(phba);
1187 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1188 lpfc_rcv_seq_check_edtov(vports[i]);
1189 lpfc_destroy_vport_work_array(phba, vports);
1191 if ((phba->link_state == LPFC_HBA_ERROR) ||
1192 (phba->pport->load_flag & FC_UNLOADING) ||
1193 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1196 spin_lock_irq(&phba->pport->work_port_lock);
1198 if (time_after(phba->last_completion_time +
1199 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1201 spin_unlock_irq(&phba->pport->work_port_lock);
1202 if (!phba->hb_outstanding)
1203 mod_timer(&phba->hb_tmofunc,
1205 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1207 mod_timer(&phba->hb_tmofunc,
1209 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1212 spin_unlock_irq(&phba->pport->work_port_lock);
1214 if (phba->elsbuf_cnt &&
1215 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1216 spin_lock_irq(&phba->hbalock);
1217 list_splice_init(&phba->elsbuf, &completions);
1218 phba->elsbuf_cnt = 0;
1219 phba->elsbuf_prev_cnt = 0;
1220 spin_unlock_irq(&phba->hbalock);
1222 while (!list_empty(&completions)) {
1223 list_remove_head(&completions, buf_ptr,
1224 struct lpfc_dmabuf, list);
1225 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1229 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1231 /* If there is no heart beat outstanding, issue a heartbeat command */
1232 if (phba->cfg_enable_hba_heartbeat) {
1233 if (!phba->hb_outstanding) {
1234 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1235 (list_empty(&psli->mboxq))) {
1236 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1239 mod_timer(&phba->hb_tmofunc,
1241 msecs_to_jiffies(1000 *
1242 LPFC_HB_MBOX_INTERVAL));
1246 lpfc_heart_beat(phba, pmboxq);
1247 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1248 pmboxq->vport = phba->pport;
1249 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1252 if (retval != MBX_BUSY &&
1253 retval != MBX_SUCCESS) {
1254 mempool_free(pmboxq,
1255 phba->mbox_mem_pool);
1256 mod_timer(&phba->hb_tmofunc,
1258 msecs_to_jiffies(1000 *
1259 LPFC_HB_MBOX_INTERVAL));
1262 phba->skipped_hb = 0;
1263 phba->hb_outstanding = 1;
1264 } else if (time_before_eq(phba->last_completion_time,
1265 phba->skipped_hb)) {
1266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1267 "2857 Last completion time not "
1268 " updated in %d ms\n",
1269 jiffies_to_msecs(jiffies
1270 - phba->last_completion_time));
1272 phba->skipped_hb = jiffies;
1274 mod_timer(&phba->hb_tmofunc,
1276 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1280 * If heart beat timeout called with hb_outstanding set
1281 * we need to give the hb mailbox cmd a chance to
1284 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1285 "0459 Adapter heartbeat still out"
1286 "standing:last compl time was %d ms.\n",
1287 jiffies_to_msecs(jiffies
1288 - phba->last_completion_time));
1289 mod_timer(&phba->hb_tmofunc,
1291 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1297 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1298 * @phba: pointer to lpfc hba data structure.
1300 * This routine is called to bring the HBA offline when HBA hardware error
1301 * other than Port Error 6 has been detected.
1304 lpfc_offline_eratt(struct lpfc_hba *phba)
1306 struct lpfc_sli *psli = &phba->sli;
1308 spin_lock_irq(&phba->hbalock);
1309 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1310 spin_unlock_irq(&phba->hbalock);
1311 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1314 lpfc_reset_barrier(phba);
1315 spin_lock_irq(&phba->hbalock);
1316 lpfc_sli_brdreset(phba);
1317 spin_unlock_irq(&phba->hbalock);
1318 lpfc_hba_down_post(phba);
1319 lpfc_sli_brdready(phba, HS_MBRDY);
1320 lpfc_unblock_mgmt_io(phba);
1321 phba->link_state = LPFC_HBA_ERROR;
1326 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1327 * @phba: pointer to lpfc hba data structure.
1329 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1330 * other than Port Error 6 has been detected.
1333 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1335 spin_lock_irq(&phba->hbalock);
1336 phba->link_state = LPFC_HBA_ERROR;
1337 spin_unlock_irq(&phba->hbalock);
1339 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1341 lpfc_hba_down_post(phba);
1342 lpfc_unblock_mgmt_io(phba);
1346 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1347 * @phba: pointer to lpfc hba data structure.
1349 * This routine is invoked to handle the deferred HBA hardware error
1350 * conditions. This type of error is indicated by HBA by setting ER1
1351 * and another ER bit in the host status register. The driver will
1352 * wait until the ER1 bit clears before handling the error condition.
1355 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1357 uint32_t old_host_status = phba->work_hs;
1358 struct lpfc_sli *psli = &phba->sli;
1360 /* If the pci channel is offline, ignore possible errors,
1361 * since we cannot communicate with the pci card anyway.
1363 if (pci_channel_offline(phba->pcidev)) {
1364 spin_lock_irq(&phba->hbalock);
1365 phba->hba_flag &= ~DEFER_ERATT;
1366 spin_unlock_irq(&phba->hbalock);
1370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1371 "0479 Deferred Adapter Hardware Error "
1372 "Data: x%x x%x x%x\n",
1374 phba->work_status[0], phba->work_status[1]);
1376 spin_lock_irq(&phba->hbalock);
1377 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1378 spin_unlock_irq(&phba->hbalock);
1382 * Firmware stops when it triggred erratt. That could cause the I/Os
1383 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1384 * SCSI layer retry it after re-establishing link.
1386 lpfc_sli_abort_fcp_rings(phba);
1389 * There was a firmware error. Take the hba offline and then
1390 * attempt to restart it.
1392 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1395 /* Wait for the ER1 bit to clear.*/
1396 while (phba->work_hs & HS_FFER1) {
1398 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1399 phba->work_hs = UNPLUG_ERR ;
1402 /* If driver is unloading let the worker thread continue */
1403 if (phba->pport->load_flag & FC_UNLOADING) {
1410 * This is to ptrotect against a race condition in which
1411 * first write to the host attention register clear the
1412 * host status register.
1414 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1415 phba->work_hs = old_host_status & ~HS_FFER1;
1417 spin_lock_irq(&phba->hbalock);
1418 phba->hba_flag &= ~DEFER_ERATT;
1419 spin_unlock_irq(&phba->hbalock);
1420 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1421 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1425 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1427 struct lpfc_board_event_header board_event;
1428 struct Scsi_Host *shost;
1430 board_event.event_type = FC_REG_BOARD_EVENT;
1431 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1432 shost = lpfc_shost_from_vport(phba->pport);
1433 fc_host_post_vendor_event(shost, fc_get_event_number(),
1434 sizeof(board_event),
1435 (char *) &board_event,
1440 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1441 * @phba: pointer to lpfc hba data structure.
1443 * This routine is invoked to handle the following HBA hardware error
1445 * 1 - HBA error attention interrupt
1446 * 2 - DMA ring index out of range
1447 * 3 - Mailbox command came back as unknown
1450 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1452 struct lpfc_vport *vport = phba->pport;
1453 struct lpfc_sli *psli = &phba->sli;
1454 uint32_t event_data;
1455 unsigned long temperature;
1456 struct temp_event temp_event_data;
1457 struct Scsi_Host *shost;
1459 /* If the pci channel is offline, ignore possible errors,
1460 * since we cannot communicate with the pci card anyway.
1462 if (pci_channel_offline(phba->pcidev)) {
1463 spin_lock_irq(&phba->hbalock);
1464 phba->hba_flag &= ~DEFER_ERATT;
1465 spin_unlock_irq(&phba->hbalock);
1469 /* If resets are disabled then leave the HBA alone and return */
1470 if (!phba->cfg_enable_hba_reset)
1473 /* Send an internal error event to mgmt application */
1474 lpfc_board_errevt_to_mgmt(phba);
1476 if (phba->hba_flag & DEFER_ERATT)
1477 lpfc_handle_deferred_eratt(phba);
1479 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1480 if (phba->work_hs & HS_FFER6)
1481 /* Re-establishing Link */
1482 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1483 "1301 Re-establishing Link "
1484 "Data: x%x x%x x%x\n",
1485 phba->work_hs, phba->work_status[0],
1486 phba->work_status[1]);
1487 if (phba->work_hs & HS_FFER8)
1488 /* Device Zeroization */
1489 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1490 "2861 Host Authentication device "
1491 "zeroization Data:x%x x%x x%x\n",
1492 phba->work_hs, phba->work_status[0],
1493 phba->work_status[1]);
1495 spin_lock_irq(&phba->hbalock);
1496 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1497 spin_unlock_irq(&phba->hbalock);
1500 * Firmware stops when it triggled erratt with HS_FFER6.
1501 * That could cause the I/Os dropped by the firmware.
1502 * Error iocb (I/O) on txcmplq and let the SCSI layer
1503 * retry it after re-establishing link.
1505 lpfc_sli_abort_fcp_rings(phba);
1508 * There was a firmware error. Take the hba offline and then
1509 * attempt to restart it.
1511 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1513 lpfc_sli_brdrestart(phba);
1514 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1515 lpfc_unblock_mgmt_io(phba);
1518 lpfc_unblock_mgmt_io(phba);
1519 } else if (phba->work_hs & HS_CRIT_TEMP) {
1520 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1521 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1522 temp_event_data.event_code = LPFC_CRIT_TEMP;
1523 temp_event_data.data = (uint32_t)temperature;
1525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1526 "0406 Adapter maximum temperature exceeded "
1527 "(%ld), taking this port offline "
1528 "Data: x%x x%x x%x\n",
1529 temperature, phba->work_hs,
1530 phba->work_status[0], phba->work_status[1]);
1532 shost = lpfc_shost_from_vport(phba->pport);
1533 fc_host_post_vendor_event(shost, fc_get_event_number(),
1534 sizeof(temp_event_data),
1535 (char *) &temp_event_data,
1536 SCSI_NL_VID_TYPE_PCI
1537 | PCI_VENDOR_ID_EMULEX);
1539 spin_lock_irq(&phba->hbalock);
1540 phba->over_temp_state = HBA_OVER_TEMP;
1541 spin_unlock_irq(&phba->hbalock);
1542 lpfc_offline_eratt(phba);
1545 /* The if clause above forces this code path when the status
1546 * failure is a value other than FFER6. Do not call the offline
1547 * twice. This is the adapter hardware error path.
1549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1550 "0457 Adapter Hardware Error "
1551 "Data: x%x x%x x%x\n",
1553 phba->work_status[0], phba->work_status[1]);
1555 event_data = FC_REG_DUMP_EVENT;
1556 shost = lpfc_shost_from_vport(vport);
1557 fc_host_post_vendor_event(shost, fc_get_event_number(),
1558 sizeof(event_data), (char *) &event_data,
1559 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1561 lpfc_offline_eratt(phba);
1567 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1568 * @phba: pointer to lpfc hba data structure.
1569 * @mbx_action: flag for mailbox shutdown action.
1571 * This routine is invoked to perform an SLI4 port PCI function reset in
1572 * response to port status register polling attention. It waits for port
1573 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1574 * During this process, interrupt vectors are freed and later requested
1575 * for handling possible port resource change.
1578 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1585 * On error status condition, driver need to wait for port
1586 * ready before performing reset.
1588 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1590 /* need reset: attempt for port recovery */
1592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1593 "2887 Reset Needed: Attempting Port "
1595 lpfc_offline_prep(phba, mbx_action);
1597 /* release interrupt for possible resource change */
1598 lpfc_sli4_disable_intr(phba);
1599 lpfc_sli_brdrestart(phba);
1600 /* request and enable interrupt */
1601 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1602 if (intr_mode == LPFC_INTR_ERROR) {
1603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1604 "3175 Failed to enable interrupt\n");
1607 phba->intr_mode = intr_mode;
1609 rc = lpfc_online(phba);
1611 lpfc_unblock_mgmt_io(phba);
1617 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1618 * @phba: pointer to lpfc hba data structure.
1620 * This routine is invoked to handle the SLI4 HBA hardware error attention
1624 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1626 struct lpfc_vport *vport = phba->pport;
1627 uint32_t event_data;
1628 struct Scsi_Host *shost;
1630 struct lpfc_register portstat_reg = {0};
1631 uint32_t reg_err1, reg_err2;
1632 uint32_t uerrlo_reg, uemasklo_reg;
1633 uint32_t pci_rd_rc1, pci_rd_rc2;
1634 bool en_rn_msg = true;
1635 struct temp_event temp_event_data;
1638 /* If the pci channel is offline, ignore possible errors, since
1639 * we cannot communicate with the pci card anyway.
1641 if (pci_channel_offline(phba->pcidev))
1644 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1646 case LPFC_SLI_INTF_IF_TYPE_0:
1647 pci_rd_rc1 = lpfc_readl(
1648 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1650 pci_rd_rc2 = lpfc_readl(
1651 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1653 /* consider PCI bus read error as pci_channel_offline */
1654 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1656 lpfc_sli4_offline_eratt(phba);
1659 case LPFC_SLI_INTF_IF_TYPE_2:
1660 pci_rd_rc1 = lpfc_readl(
1661 phba->sli4_hba.u.if_type2.STATUSregaddr,
1662 &portstat_reg.word0);
1663 /* consider PCI bus read error as pci_channel_offline */
1664 if (pci_rd_rc1 == -EIO) {
1665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1666 "3151 PCI bus read access failure: x%x\n",
1667 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1670 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1671 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1672 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1674 "2889 Port Overtemperature event, "
1675 "taking port offline Data: x%x x%x\n",
1676 reg_err1, reg_err2);
1678 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1679 temp_event_data.event_code = LPFC_CRIT_TEMP;
1680 temp_event_data.data = 0xFFFFFFFF;
1682 shost = lpfc_shost_from_vport(phba->pport);
1683 fc_host_post_vendor_event(shost, fc_get_event_number(),
1684 sizeof(temp_event_data),
1685 (char *)&temp_event_data,
1686 SCSI_NL_VID_TYPE_PCI
1687 | PCI_VENDOR_ID_EMULEX);
1689 spin_lock_irq(&phba->hbalock);
1690 phba->over_temp_state = HBA_OVER_TEMP;
1691 spin_unlock_irq(&phba->hbalock);
1692 lpfc_sli4_offline_eratt(phba);
1695 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1696 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1698 "3143 Port Down: Firmware Update "
1701 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1702 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1704 "3144 Port Down: Debug Dump\n");
1705 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1706 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1708 "3145 Port Down: Provisioning\n");
1710 /* If resets are disabled then leave the HBA alone and return */
1711 if (!phba->cfg_enable_hba_reset)
1714 /* Check port status register for function reset */
1715 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1718 /* don't report event on forced debug dump */
1719 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1720 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1725 /* fall through for not able to recover */
1726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1727 "3152 Unrecoverable error, bring the port "
1729 lpfc_sli4_offline_eratt(phba);
1731 case LPFC_SLI_INTF_IF_TYPE_1:
1735 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1736 "3123 Report dump event to upper layer\n");
1737 /* Send an internal error event to mgmt application */
1738 lpfc_board_errevt_to_mgmt(phba);
1740 event_data = FC_REG_DUMP_EVENT;
1741 shost = lpfc_shost_from_vport(vport);
1742 fc_host_post_vendor_event(shost, fc_get_event_number(),
1743 sizeof(event_data), (char *) &event_data,
1744 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1748 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1749 * @phba: pointer to lpfc HBA data structure.
1751 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1752 * routine from the API jump table function pointer from the lpfc_hba struct.
1756 * Any other value - error.
1759 lpfc_handle_eratt(struct lpfc_hba *phba)
1761 (*phba->lpfc_handle_eratt)(phba);
1765 * lpfc_handle_latt - The HBA link event handler
1766 * @phba: pointer to lpfc hba data structure.
1768 * This routine is invoked from the worker thread to handle a HBA host
1769 * attention link event.
1772 lpfc_handle_latt(struct lpfc_hba *phba)
1774 struct lpfc_vport *vport = phba->pport;
1775 struct lpfc_sli *psli = &phba->sli;
1777 volatile uint32_t control;
1778 struct lpfc_dmabuf *mp;
1781 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1784 goto lpfc_handle_latt_err_exit;
1787 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1790 goto lpfc_handle_latt_free_pmb;
1793 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1796 goto lpfc_handle_latt_free_mp;
1799 /* Cleanup any outstanding ELS commands */
1800 lpfc_els_flush_all_cmd(phba);
1802 psli->slistat.link_event++;
1803 lpfc_read_topology(phba, pmb, mp);
1804 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1806 /* Block ELS IOCBs until we have processed this mbox command */
1807 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1808 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1809 if (rc == MBX_NOT_FINISHED) {
1811 goto lpfc_handle_latt_free_mbuf;
1814 /* Clear Link Attention in HA REG */
1815 spin_lock_irq(&phba->hbalock);
1816 writel(HA_LATT, phba->HAregaddr);
1817 readl(phba->HAregaddr); /* flush */
1818 spin_unlock_irq(&phba->hbalock);
1822 lpfc_handle_latt_free_mbuf:
1823 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1824 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1825 lpfc_handle_latt_free_mp:
1827 lpfc_handle_latt_free_pmb:
1828 mempool_free(pmb, phba->mbox_mem_pool);
1829 lpfc_handle_latt_err_exit:
1830 /* Enable Link attention interrupts */
1831 spin_lock_irq(&phba->hbalock);
1832 psli->sli_flag |= LPFC_PROCESS_LA;
1833 control = readl(phba->HCregaddr);
1834 control |= HC_LAINT_ENA;
1835 writel(control, phba->HCregaddr);
1836 readl(phba->HCregaddr); /* flush */
1838 /* Clear Link Attention in HA REG */
1839 writel(HA_LATT, phba->HAregaddr);
1840 readl(phba->HAregaddr); /* flush */
1841 spin_unlock_irq(&phba->hbalock);
1842 lpfc_linkdown(phba);
1843 phba->link_state = LPFC_HBA_ERROR;
1845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1846 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1852 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1853 * @phba: pointer to lpfc hba data structure.
1854 * @vpd: pointer to the vital product data.
1855 * @len: length of the vital product data in bytes.
1857 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1858 * an array of characters. In this routine, the ModelName, ProgramType, and
1859 * ModelDesc, etc. fields of the phba data structure will be populated.
1862 * 0 - pointer to the VPD passed in is NULL
1866 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1868 uint8_t lenlo, lenhi;
1878 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1879 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1880 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1882 while (!finished && (index < (len - 4))) {
1883 switch (vpd[index]) {
1891 i = ((((unsigned short)lenhi) << 8) + lenlo);
1900 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1901 if (Length > len - index)
1902 Length = len - index;
1903 while (Length > 0) {
1904 /* Look for Serial Number */
1905 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1912 phba->SerialNumber[j++] = vpd[index++];
1916 phba->SerialNumber[j] = 0;
1919 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1920 phba->vpd_flag |= VPD_MODEL_DESC;
1927 phba->ModelDesc[j++] = vpd[index++];
1931 phba->ModelDesc[j] = 0;
1934 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1935 phba->vpd_flag |= VPD_MODEL_NAME;
1942 phba->ModelName[j++] = vpd[index++];
1946 phba->ModelName[j] = 0;
1949 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1950 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1957 phba->ProgramType[j++] = vpd[index++];
1961 phba->ProgramType[j] = 0;
1964 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1965 phba->vpd_flag |= VPD_PORT;
1972 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1973 (phba->sli4_hba.pport_name_sta ==
1974 LPFC_SLI4_PPNAME_GET)) {
1978 phba->Port[j++] = vpd[index++];
1982 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1983 (phba->sli4_hba.pport_name_sta ==
1984 LPFC_SLI4_PPNAME_NON))
2011 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2012 * @phba: pointer to lpfc hba data structure.
2013 * @mdp: pointer to the data structure to hold the derived model name.
2014 * @descp: pointer to the data structure to hold the derived description.
2016 * This routine retrieves HBA's description based on its registered PCI device
2017 * ID. The @descp passed into this function points to an array of 256 chars. It
2018 * shall be returned with the model name, maximum speed, and the host bus type.
2019 * The @mdp passed into this function points to an array of 80 chars. When the
2020 * function returns, the @mdp will be filled with the model name.
2023 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2026 uint16_t dev_id = phba->pcidev->device;
2029 int oneConnect = 0; /* default is not a oneConnect */
2034 } m = {"<Unknown>", "", ""};
2036 if (mdp && mdp[0] != '\0'
2037 && descp && descp[0] != '\0')
2040 if (phba->lmt & LMT_32Gb)
2042 else if (phba->lmt & LMT_16Gb)
2044 else if (phba->lmt & LMT_10Gb)
2046 else if (phba->lmt & LMT_8Gb)
2048 else if (phba->lmt & LMT_4Gb)
2050 else if (phba->lmt & LMT_2Gb)
2052 else if (phba->lmt & LMT_1Gb)
2060 case PCI_DEVICE_ID_FIREFLY:
2061 m = (typeof(m)){"LP6000", "PCI",
2062 "Obsolete, Unsupported Fibre Channel Adapter"};
2064 case PCI_DEVICE_ID_SUPERFLY:
2065 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2066 m = (typeof(m)){"LP7000", "PCI", ""};
2068 m = (typeof(m)){"LP7000E", "PCI", ""};
2069 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2071 case PCI_DEVICE_ID_DRAGONFLY:
2072 m = (typeof(m)){"LP8000", "PCI",
2073 "Obsolete, Unsupported Fibre Channel Adapter"};
2075 case PCI_DEVICE_ID_CENTAUR:
2076 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2077 m = (typeof(m)){"LP9002", "PCI", ""};
2079 m = (typeof(m)){"LP9000", "PCI", ""};
2080 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2082 case PCI_DEVICE_ID_RFLY:
2083 m = (typeof(m)){"LP952", "PCI",
2084 "Obsolete, Unsupported Fibre Channel Adapter"};
2086 case PCI_DEVICE_ID_PEGASUS:
2087 m = (typeof(m)){"LP9802", "PCI-X",
2088 "Obsolete, Unsupported Fibre Channel Adapter"};
2090 case PCI_DEVICE_ID_THOR:
2091 m = (typeof(m)){"LP10000", "PCI-X",
2092 "Obsolete, Unsupported Fibre Channel Adapter"};
2094 case PCI_DEVICE_ID_VIPER:
2095 m = (typeof(m)){"LPX1000", "PCI-X",
2096 "Obsolete, Unsupported Fibre Channel Adapter"};
2098 case PCI_DEVICE_ID_PFLY:
2099 m = (typeof(m)){"LP982", "PCI-X",
2100 "Obsolete, Unsupported Fibre Channel Adapter"};
2102 case PCI_DEVICE_ID_TFLY:
2103 m = (typeof(m)){"LP1050", "PCI-X",
2104 "Obsolete, Unsupported Fibre Channel Adapter"};
2106 case PCI_DEVICE_ID_HELIOS:
2107 m = (typeof(m)){"LP11000", "PCI-X2",
2108 "Obsolete, Unsupported Fibre Channel Adapter"};
2110 case PCI_DEVICE_ID_HELIOS_SCSP:
2111 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2112 "Obsolete, Unsupported Fibre Channel Adapter"};
2114 case PCI_DEVICE_ID_HELIOS_DCSP:
2115 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2116 "Obsolete, Unsupported Fibre Channel Adapter"};
2118 case PCI_DEVICE_ID_NEPTUNE:
2119 m = (typeof(m)){"LPe1000", "PCIe",
2120 "Obsolete, Unsupported Fibre Channel Adapter"};
2122 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2123 m = (typeof(m)){"LPe1000-SP", "PCIe",
2124 "Obsolete, Unsupported Fibre Channel Adapter"};
2126 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2127 m = (typeof(m)){"LPe1002-SP", "PCIe",
2128 "Obsolete, Unsupported Fibre Channel Adapter"};
2130 case PCI_DEVICE_ID_BMID:
2131 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2133 case PCI_DEVICE_ID_BSMB:
2134 m = (typeof(m)){"LP111", "PCI-X2",
2135 "Obsolete, Unsupported Fibre Channel Adapter"};
2137 case PCI_DEVICE_ID_ZEPHYR:
2138 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2140 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2141 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2143 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2144 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2147 case PCI_DEVICE_ID_ZMID:
2148 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2150 case PCI_DEVICE_ID_ZSMB:
2151 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2153 case PCI_DEVICE_ID_LP101:
2154 m = (typeof(m)){"LP101", "PCI-X",
2155 "Obsolete, Unsupported Fibre Channel Adapter"};
2157 case PCI_DEVICE_ID_LP10000S:
2158 m = (typeof(m)){"LP10000-S", "PCI",
2159 "Obsolete, Unsupported Fibre Channel Adapter"};
2161 case PCI_DEVICE_ID_LP11000S:
2162 m = (typeof(m)){"LP11000-S", "PCI-X2",
2163 "Obsolete, Unsupported Fibre Channel Adapter"};
2165 case PCI_DEVICE_ID_LPE11000S:
2166 m = (typeof(m)){"LPe11000-S", "PCIe",
2167 "Obsolete, Unsupported Fibre Channel Adapter"};
2169 case PCI_DEVICE_ID_SAT:
2170 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2172 case PCI_DEVICE_ID_SAT_MID:
2173 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2175 case PCI_DEVICE_ID_SAT_SMB:
2176 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2178 case PCI_DEVICE_ID_SAT_DCSP:
2179 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2181 case PCI_DEVICE_ID_SAT_SCSP:
2182 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2184 case PCI_DEVICE_ID_SAT_S:
2185 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2187 case PCI_DEVICE_ID_HORNET:
2188 m = (typeof(m)){"LP21000", "PCIe",
2189 "Obsolete, Unsupported FCoE Adapter"};
2192 case PCI_DEVICE_ID_PROTEUS_VF:
2193 m = (typeof(m)){"LPev12000", "PCIe IOV",
2194 "Obsolete, Unsupported Fibre Channel Adapter"};
2196 case PCI_DEVICE_ID_PROTEUS_PF:
2197 m = (typeof(m)){"LPev12000", "PCIe IOV",
2198 "Obsolete, Unsupported Fibre Channel Adapter"};
2200 case PCI_DEVICE_ID_PROTEUS_S:
2201 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2202 "Obsolete, Unsupported Fibre Channel Adapter"};
2204 case PCI_DEVICE_ID_TIGERSHARK:
2206 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2208 case PCI_DEVICE_ID_TOMCAT:
2210 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2212 case PCI_DEVICE_ID_FALCON:
2213 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2214 "EmulexSecure Fibre"};
2216 case PCI_DEVICE_ID_BALIUS:
2217 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2218 "Obsolete, Unsupported Fibre Channel Adapter"};
2220 case PCI_DEVICE_ID_LANCER_FC:
2221 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2223 case PCI_DEVICE_ID_LANCER_FC_VF:
2224 m = (typeof(m)){"LPe16000", "PCIe",
2225 "Obsolete, Unsupported Fibre Channel Adapter"};
2227 case PCI_DEVICE_ID_LANCER_FCOE:
2229 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2231 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2233 m = (typeof(m)){"OCe15100", "PCIe",
2234 "Obsolete, Unsupported FCoE"};
2236 case PCI_DEVICE_ID_LANCER_G6_FC:
2237 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2239 case PCI_DEVICE_ID_SKYHAWK:
2240 case PCI_DEVICE_ID_SKYHAWK_VF:
2242 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2245 m = (typeof(m)){"Unknown", "", ""};
2249 if (mdp && mdp[0] == '\0')
2250 snprintf(mdp, 79,"%s", m.name);
2252 * oneConnect hba requires special processing, they are all initiators
2253 * and we put the port number on the end
2255 if (descp && descp[0] == '\0') {
2257 snprintf(descp, 255,
2258 "Emulex OneConnect %s, %s Initiator %s",
2261 else if (max_speed == 0)
2262 snprintf(descp, 255,
2264 m.name, m.bus, m.function);
2266 snprintf(descp, 255,
2267 "Emulex %s %d%s %s %s",
2268 m.name, max_speed, (GE) ? "GE" : "Gb",
2274 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2275 * @phba: pointer to lpfc hba data structure.
2276 * @pring: pointer to a IOCB ring.
2277 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2279 * This routine posts a given number of IOCBs with the associated DMA buffer
2280 * descriptors specified by the cnt argument to the given IOCB ring.
2283 * The number of IOCBs NOT able to be posted to the IOCB ring.
2286 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2289 struct lpfc_iocbq *iocb;
2290 struct lpfc_dmabuf *mp1, *mp2;
2292 cnt += pring->missbufcnt;
2294 /* While there are buffers to post */
2296 /* Allocate buffer for command iocb */
2297 iocb = lpfc_sli_get_iocbq(phba);
2299 pring->missbufcnt = cnt;
2304 /* 2 buffers can be posted per command */
2305 /* Allocate buffer to post */
2306 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2308 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2309 if (!mp1 || !mp1->virt) {
2311 lpfc_sli_release_iocbq(phba, iocb);
2312 pring->missbufcnt = cnt;
2316 INIT_LIST_HEAD(&mp1->list);
2317 /* Allocate buffer to post */
2319 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2321 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2323 if (!mp2 || !mp2->virt) {
2325 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2327 lpfc_sli_release_iocbq(phba, iocb);
2328 pring->missbufcnt = cnt;
2332 INIT_LIST_HEAD(&mp2->list);
2337 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2338 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2339 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2340 icmd->ulpBdeCount = 1;
2343 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2344 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2345 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2347 icmd->ulpBdeCount = 2;
2350 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2353 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2355 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2359 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2363 lpfc_sli_release_iocbq(phba, iocb);
2364 pring->missbufcnt = cnt;
2367 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2369 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2371 pring->missbufcnt = 0;
2376 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2377 * @phba: pointer to lpfc hba data structure.
2379 * This routine posts initial receive IOCB buffers to the ELS ring. The
2380 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2384 * 0 - success (currently always success)
2387 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2389 struct lpfc_sli *psli = &phba->sli;
2391 /* Ring 0, ELS / CT buffers */
2392 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2393 /* Ring 2 - FCP no buffers needed */
2398 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2401 * lpfc_sha_init - Set up initial array of hash table entries
2402 * @HashResultPointer: pointer to an array as hash table.
2404 * This routine sets up the initial values to the array of hash table entries
2408 lpfc_sha_init(uint32_t * HashResultPointer)
2410 HashResultPointer[0] = 0x67452301;
2411 HashResultPointer[1] = 0xEFCDAB89;
2412 HashResultPointer[2] = 0x98BADCFE;
2413 HashResultPointer[3] = 0x10325476;
2414 HashResultPointer[4] = 0xC3D2E1F0;
2418 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2419 * @HashResultPointer: pointer to an initial/result hash table.
2420 * @HashWorkingPointer: pointer to an working hash table.
2422 * This routine iterates an initial hash table pointed by @HashResultPointer
2423 * with the values from the working hash table pointeed by @HashWorkingPointer.
2424 * The results are putting back to the initial hash table, returned through
2425 * the @HashResultPointer as the result hash table.
2428 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2432 uint32_t A, B, C, D, E;
2435 HashWorkingPointer[t] =
2437 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2439 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2440 } while (++t <= 79);
2442 A = HashResultPointer[0];
2443 B = HashResultPointer[1];
2444 C = HashResultPointer[2];
2445 D = HashResultPointer[3];
2446 E = HashResultPointer[4];
2450 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2451 } else if (t < 40) {
2452 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2453 } else if (t < 60) {
2454 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2456 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2458 TEMP += S(5, A) + E + HashWorkingPointer[t];
2464 } while (++t <= 79);
2466 HashResultPointer[0] += A;
2467 HashResultPointer[1] += B;
2468 HashResultPointer[2] += C;
2469 HashResultPointer[3] += D;
2470 HashResultPointer[4] += E;
2475 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2476 * @RandomChallenge: pointer to the entry of host challenge random number array.
2477 * @HashWorking: pointer to the entry of the working hash array.
2479 * This routine calculates the working hash array referred by @HashWorking
2480 * from the challenge random numbers associated with the host, referred by
2481 * @RandomChallenge. The result is put into the entry of the working hash
2482 * array and returned by reference through @HashWorking.
2485 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2487 *HashWorking = (*RandomChallenge ^ *HashWorking);
2491 * lpfc_hba_init - Perform special handling for LC HBA initialization
2492 * @phba: pointer to lpfc hba data structure.
2493 * @hbainit: pointer to an array of unsigned 32-bit integers.
2495 * This routine performs the special handling for LC HBA initialization.
2498 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2501 uint32_t *HashWorking;
2502 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2504 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2508 HashWorking[0] = HashWorking[78] = *pwwnn++;
2509 HashWorking[1] = HashWorking[79] = *pwwnn;
2511 for (t = 0; t < 7; t++)
2512 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2514 lpfc_sha_init(hbainit);
2515 lpfc_sha_iterate(hbainit, HashWorking);
2520 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2521 * @vport: pointer to a virtual N_Port data structure.
2523 * This routine performs the necessary cleanups before deleting the @vport.
2524 * It invokes the discovery state machine to perform necessary state
2525 * transitions and to release the ndlps associated with the @vport. Note,
2526 * the physical port is treated as @vport 0.
2529 lpfc_cleanup(struct lpfc_vport *vport)
2531 struct lpfc_hba *phba = vport->phba;
2532 struct lpfc_nodelist *ndlp, *next_ndlp;
2535 if (phba->link_state > LPFC_LINK_DOWN)
2536 lpfc_port_link_failure(vport);
2538 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2539 if (!NLP_CHK_NODE_ACT(ndlp)) {
2540 ndlp = lpfc_enable_node(vport, ndlp,
2541 NLP_STE_UNUSED_NODE);
2544 spin_lock_irq(&phba->ndlp_lock);
2545 NLP_SET_FREE_REQ(ndlp);
2546 spin_unlock_irq(&phba->ndlp_lock);
2547 /* Trigger the release of the ndlp memory */
2551 spin_lock_irq(&phba->ndlp_lock);
2552 if (NLP_CHK_FREE_REQ(ndlp)) {
2553 /* The ndlp should not be in memory free mode already */
2554 spin_unlock_irq(&phba->ndlp_lock);
2557 /* Indicate request for freeing ndlp memory */
2558 NLP_SET_FREE_REQ(ndlp);
2559 spin_unlock_irq(&phba->ndlp_lock);
2561 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2562 ndlp->nlp_DID == Fabric_DID) {
2563 /* Just free up ndlp with Fabric_DID for vports */
2568 /* take care of nodes in unused state before the state
2569 * machine taking action.
2571 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2576 if (ndlp->nlp_type & NLP_FABRIC)
2577 lpfc_disc_state_machine(vport, ndlp, NULL,
2578 NLP_EVT_DEVICE_RECOVERY);
2580 lpfc_disc_state_machine(vport, ndlp, NULL,
2584 /* At this point, ALL ndlp's should be gone
2585 * because of the previous NLP_EVT_DEVICE_RM.
2586 * Lets wait for this to happen, if needed.
2588 while (!list_empty(&vport->fc_nodes)) {
2590 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2591 "0233 Nodelist not empty\n");
2592 list_for_each_entry_safe(ndlp, next_ndlp,
2593 &vport->fc_nodes, nlp_listp) {
2594 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2596 "0282 did:x%x ndlp:x%p "
2597 "usgmap:x%x refcnt:%d\n",
2598 ndlp->nlp_DID, (void *)ndlp,
2601 &ndlp->kref.refcount));
2606 /* Wait for any activity on ndlps to settle */
2609 lpfc_cleanup_vports_rrqs(vport, NULL);
2613 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2614 * @vport: pointer to a virtual N_Port data structure.
2616 * This routine stops all the timers associated with a @vport. This function
2617 * is invoked before disabling or deleting a @vport. Note that the physical
2618 * port is treated as @vport 0.
2621 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2623 del_timer_sync(&vport->els_tmofunc);
2624 del_timer_sync(&vport->fc_fdmitmo);
2625 del_timer_sync(&vport->delayed_disc_tmo);
2626 lpfc_can_disctmo(vport);
2631 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2632 * @phba: pointer to lpfc hba data structure.
2634 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2635 * caller of this routine should already hold the host lock.
2638 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2640 /* Clear pending FCF rediscovery wait flag */
2641 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2643 /* Now, try to stop the timer */
2644 del_timer(&phba->fcf.redisc_wait);
2648 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2649 * @phba: pointer to lpfc hba data structure.
2651 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2652 * checks whether the FCF rediscovery wait timer is pending with the host
2653 * lock held before proceeding with disabling the timer and clearing the
2654 * wait timer pendig flag.
2657 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2659 spin_lock_irq(&phba->hbalock);
2660 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2661 /* FCF rediscovery timer already fired or stopped */
2662 spin_unlock_irq(&phba->hbalock);
2665 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2666 /* Clear failover in progress flags */
2667 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2668 spin_unlock_irq(&phba->hbalock);
2672 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2673 * @phba: pointer to lpfc hba data structure.
2675 * This routine stops all the timers associated with a HBA. This function is
2676 * invoked before either putting a HBA offline or unloading the driver.
2679 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2681 lpfc_stop_vport_timers(phba->pport);
2682 del_timer_sync(&phba->sli.mbox_tmo);
2683 del_timer_sync(&phba->fabric_block_timer);
2684 del_timer_sync(&phba->eratt_poll);
2685 del_timer_sync(&phba->hb_tmofunc);
2686 if (phba->sli_rev == LPFC_SLI_REV4) {
2687 del_timer_sync(&phba->rrq_tmr);
2688 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2690 phba->hb_outstanding = 0;
2692 switch (phba->pci_dev_grp) {
2693 case LPFC_PCI_DEV_LP:
2694 /* Stop any LightPulse device specific driver timers */
2695 del_timer_sync(&phba->fcp_poll_timer);
2697 case LPFC_PCI_DEV_OC:
2698 /* Stop any OneConnect device sepcific driver timers */
2699 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2703 "0297 Invalid device group (x%x)\n",
2711 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2712 * @phba: pointer to lpfc hba data structure.
2714 * This routine marks a HBA's management interface as blocked. Once the HBA's
2715 * management interface is marked as blocked, all the user space access to
2716 * the HBA, whether they are from sysfs interface or libdfc interface will
2717 * all be blocked. The HBA is set to block the management interface when the
2718 * driver prepares the HBA interface for online or offline.
2721 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2723 unsigned long iflag;
2724 uint8_t actcmd = MBX_HEARTBEAT;
2725 unsigned long timeout;
2727 spin_lock_irqsave(&phba->hbalock, iflag);
2728 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2729 spin_unlock_irqrestore(&phba->hbalock, iflag);
2730 if (mbx_action == LPFC_MBX_NO_WAIT)
2732 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2733 spin_lock_irqsave(&phba->hbalock, iflag);
2734 if (phba->sli.mbox_active) {
2735 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2736 /* Determine how long we might wait for the active mailbox
2737 * command to be gracefully completed by firmware.
2739 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2740 phba->sli.mbox_active) * 1000) + jiffies;
2742 spin_unlock_irqrestore(&phba->hbalock, iflag);
2744 /* Wait for the outstnading mailbox command to complete */
2745 while (phba->sli.mbox_active) {
2746 /* Check active mailbox complete status every 2ms */
2748 if (time_after(jiffies, timeout)) {
2749 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2750 "2813 Mgmt IO is Blocked %x "
2751 "- mbox cmd %x still active\n",
2752 phba->sli.sli_flag, actcmd);
2759 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2760 * @phba: pointer to lpfc hba data structure.
2762 * Allocate RPIs for all active remote nodes. This is needed whenever
2763 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2764 * is to fixup the temporary rpi assignments.
2767 lpfc_sli4_node_prep(struct lpfc_hba *phba)
2769 struct lpfc_nodelist *ndlp, *next_ndlp;
2770 struct lpfc_vport **vports;
2773 if (phba->sli_rev != LPFC_SLI_REV4)
2776 vports = lpfc_create_vport_work_array(phba);
2777 if (vports != NULL) {
2778 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2779 if (vports[i]->load_flag & FC_UNLOADING)
2782 list_for_each_entry_safe(ndlp, next_ndlp,
2783 &vports[i]->fc_nodes,
2785 if (NLP_CHK_NODE_ACT(ndlp)) {
2787 lpfc_sli4_alloc_rpi(phba);
2788 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2790 "0009 rpi:%x DID:%x "
2791 "flg:%x map:%x %p\n",
2801 lpfc_destroy_vport_work_array(phba, vports);
2805 * lpfc_online - Initialize and bring a HBA online
2806 * @phba: pointer to lpfc hba data structure.
2808 * This routine initializes the HBA and brings a HBA online. During this
2809 * process, the management interface is blocked to prevent user space access
2810 * to the HBA interfering with the driver initialization.
2817 lpfc_online(struct lpfc_hba *phba)
2819 struct lpfc_vport *vport;
2820 struct lpfc_vport **vports;
2822 bool vpis_cleared = false;
2826 vport = phba->pport;
2828 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2831 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2832 "0458 Bring Adapter online\n");
2834 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2836 if (!lpfc_sli_queue_setup(phba)) {
2837 lpfc_unblock_mgmt_io(phba);
2841 if (phba->sli_rev == LPFC_SLI_REV4) {
2842 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2843 lpfc_unblock_mgmt_io(phba);
2846 spin_lock_irq(&phba->hbalock);
2847 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2848 vpis_cleared = true;
2849 spin_unlock_irq(&phba->hbalock);
2851 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2852 lpfc_unblock_mgmt_io(phba);
2857 vports = lpfc_create_vport_work_array(phba);
2858 if (vports != NULL) {
2859 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2860 struct Scsi_Host *shost;
2861 shost = lpfc_shost_from_vport(vports[i]);
2862 spin_lock_irq(shost->host_lock);
2863 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2864 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2865 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2866 if (phba->sli_rev == LPFC_SLI_REV4) {
2867 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2868 if ((vpis_cleared) &&
2869 (vports[i]->port_type !=
2870 LPFC_PHYSICAL_PORT))
2873 spin_unlock_irq(shost->host_lock);
2876 lpfc_destroy_vport_work_array(phba, vports);
2878 lpfc_unblock_mgmt_io(phba);
2883 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2884 * @phba: pointer to lpfc hba data structure.
2886 * This routine marks a HBA's management interface as not blocked. Once the
2887 * HBA's management interface is marked as not blocked, all the user space
2888 * access to the HBA, whether they are from sysfs interface or libdfc
2889 * interface will be allowed. The HBA is set to block the management interface
2890 * when the driver prepares the HBA interface for online or offline and then
2891 * set to unblock the management interface afterwards.
2894 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2896 unsigned long iflag;
2898 spin_lock_irqsave(&phba->hbalock, iflag);
2899 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2900 spin_unlock_irqrestore(&phba->hbalock, iflag);
2904 * lpfc_offline_prep - Prepare a HBA to be brought offline
2905 * @phba: pointer to lpfc hba data structure.
2907 * This routine is invoked to prepare a HBA to be brought offline. It performs
2908 * unregistration login to all the nodes on all vports and flushes the mailbox
2909 * queue to make it ready to be brought offline.
2912 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2914 struct lpfc_vport *vport = phba->pport;
2915 struct lpfc_nodelist *ndlp, *next_ndlp;
2916 struct lpfc_vport **vports;
2917 struct Scsi_Host *shost;
2920 if (vport->fc_flag & FC_OFFLINE_MODE)
2923 lpfc_block_mgmt_io(phba, mbx_action);
2925 lpfc_linkdown(phba);
2927 /* Issue an unreg_login to all nodes on all vports */
2928 vports = lpfc_create_vport_work_array(phba);
2929 if (vports != NULL) {
2930 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2931 if (vports[i]->load_flag & FC_UNLOADING)
2933 shost = lpfc_shost_from_vport(vports[i]);
2934 spin_lock_irq(shost->host_lock);
2935 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2936 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2937 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2938 spin_unlock_irq(shost->host_lock);
2940 shost = lpfc_shost_from_vport(vports[i]);
2941 list_for_each_entry_safe(ndlp, next_ndlp,
2942 &vports[i]->fc_nodes,
2944 if (!NLP_CHK_NODE_ACT(ndlp))
2946 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2948 if (ndlp->nlp_type & NLP_FABRIC) {
2949 lpfc_disc_state_machine(vports[i], ndlp,
2950 NULL, NLP_EVT_DEVICE_RECOVERY);
2951 lpfc_disc_state_machine(vports[i], ndlp,
2952 NULL, NLP_EVT_DEVICE_RM);
2954 spin_lock_irq(shost->host_lock);
2955 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2956 spin_unlock_irq(shost->host_lock);
2958 * Whenever an SLI4 port goes offline, free the
2959 * RPI. Get a new RPI when the adapter port
2960 * comes back online.
2962 if (phba->sli_rev == LPFC_SLI_REV4) {
2963 lpfc_printf_vlog(ndlp->vport,
2964 KERN_INFO, LOG_NODE,
2965 "0011 lpfc_offline: "
2967 "usgmap:x%x rpi:%x\n",
2968 ndlp, ndlp->nlp_DID,
2972 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2974 lpfc_unreg_rpi(vports[i], ndlp);
2978 lpfc_destroy_vport_work_array(phba, vports);
2980 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2984 * lpfc_offline - Bring a HBA offline
2985 * @phba: pointer to lpfc hba data structure.
2987 * This routine actually brings a HBA offline. It stops all the timers
2988 * associated with the HBA, brings down the SLI layer, and eventually
2989 * marks the HBA as in offline state for the upper layer protocol.
2992 lpfc_offline(struct lpfc_hba *phba)
2994 struct Scsi_Host *shost;
2995 struct lpfc_vport **vports;
2998 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3001 /* stop port and all timers associated with this hba */
3002 lpfc_stop_port(phba);
3003 vports = lpfc_create_vport_work_array(phba);
3005 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3006 lpfc_stop_vport_timers(vports[i]);
3007 lpfc_destroy_vport_work_array(phba, vports);
3008 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3009 "0460 Bring Adapter offline\n");
3010 /* Bring down the SLI Layer and cleanup. The HBA is offline
3012 lpfc_sli_hba_down(phba);
3013 spin_lock_irq(&phba->hbalock);
3015 spin_unlock_irq(&phba->hbalock);
3016 vports = lpfc_create_vport_work_array(phba);
3018 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3019 shost = lpfc_shost_from_vport(vports[i]);
3020 spin_lock_irq(shost->host_lock);
3021 vports[i]->work_port_events = 0;
3022 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3023 spin_unlock_irq(shost->host_lock);
3025 lpfc_destroy_vport_work_array(phba, vports);
3029 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3030 * @phba: pointer to lpfc hba data structure.
3032 * This routine is to free all the SCSI buffers and IOCBs from the driver
3033 * list back to kernel. It is called from lpfc_pci_remove_one to free
3034 * the internal resources before the device is removed from the system.
3037 lpfc_scsi_free(struct lpfc_hba *phba)
3039 struct lpfc_scsi_buf *sb, *sb_next;
3040 struct lpfc_iocbq *io, *io_next;
3042 spin_lock_irq(&phba->hbalock);
3044 /* Release all the lpfc_scsi_bufs maintained by this host. */
3046 spin_lock(&phba->scsi_buf_list_put_lock);
3047 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3049 list_del(&sb->list);
3050 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3053 phba->total_scsi_bufs--;
3055 spin_unlock(&phba->scsi_buf_list_put_lock);
3057 spin_lock(&phba->scsi_buf_list_get_lock);
3058 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3060 list_del(&sb->list);
3061 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3064 phba->total_scsi_bufs--;
3066 spin_unlock(&phba->scsi_buf_list_get_lock);
3068 /* Release all the lpfc_iocbq entries maintained by this host. */
3069 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
3070 list_del(&io->list);
3072 phba->total_iocbq_bufs--;
3075 spin_unlock_irq(&phba->hbalock);
3079 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
3080 * @phba: pointer to lpfc hba data structure.
3082 * This routine first calculates the sizes of the current els and allocated
3083 * scsi sgl lists, and then goes through all sgls to updates the physical
3084 * XRIs assigned due to port function reset. During port initialization, the
3085 * current els and allocated scsi sgl lists are 0s.
3088 * 0 - successful (for now, it always returns 0)
3091 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3093 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3094 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
3095 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
3096 LIST_HEAD(els_sgl_list);
3097 LIST_HEAD(scsi_sgl_list);
3099 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3102 * update on pci function's els xri-sgl list
3104 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3105 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3106 /* els xri-sgl expanded */
3107 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3108 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3109 "3157 ELS xri-sgl count increased from "
3110 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3112 /* allocate the additional els sgls */
3113 for (i = 0; i < xri_cnt; i++) {
3114 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3116 if (sglq_entry == NULL) {
3117 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3118 "2562 Failure to allocate an "
3119 "ELS sgl entry:%d\n", i);
3123 sglq_entry->buff_type = GEN_BUFF_TYPE;
3124 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3126 if (sglq_entry->virt == NULL) {
3128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3129 "2563 Failure to allocate an "
3130 "ELS mbuf:%d\n", i);
3134 sglq_entry->sgl = sglq_entry->virt;
3135 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3136 sglq_entry->state = SGL_FREED;
3137 list_add_tail(&sglq_entry->list, &els_sgl_list);
3139 spin_lock_irq(&phba->hbalock);
3140 spin_lock(&pring->ring_lock);
3141 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3142 spin_unlock(&pring->ring_lock);
3143 spin_unlock_irq(&phba->hbalock);
3144 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3145 /* els xri-sgl shrinked */
3146 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3147 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3148 "3158 ELS xri-sgl count decreased from "
3149 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3151 spin_lock_irq(&phba->hbalock);
3152 spin_lock(&pring->ring_lock);
3153 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
3154 spin_unlock(&pring->ring_lock);
3155 spin_unlock_irq(&phba->hbalock);
3156 /* release extra els sgls from list */
3157 for (i = 0; i < xri_cnt; i++) {
3158 list_remove_head(&els_sgl_list,
3159 sglq_entry, struct lpfc_sglq, list);
3161 lpfc_mbuf_free(phba, sglq_entry->virt,
3166 spin_lock_irq(&phba->hbalock);
3167 spin_lock(&pring->ring_lock);
3168 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3169 spin_unlock(&pring->ring_lock);
3170 spin_unlock_irq(&phba->hbalock);
3172 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3173 "3163 ELS xri-sgl count unchanged: %d\n",
3175 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3177 /* update xris to els sgls on the list */
3179 sglq_entry_next = NULL;
3180 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3181 &phba->sli4_hba.lpfc_sgl_list, list) {
3182 lxri = lpfc_sli4_next_xritag(phba);
3183 if (lxri == NO_XRI) {
3184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3185 "2400 Failed to allocate xri for "
3190 sglq_entry->sli4_lxritag = lxri;
3191 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3195 * update on pci function's allocated scsi xri-sgl list
3197 phba->total_scsi_bufs = 0;
3199 /* maximum number of xris available for scsi buffers */
3200 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3203 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3204 "2401 Current allocated SCSI xri-sgl count:%d, "
3205 "maximum SCSI xri count:%d\n",
3206 phba->sli4_hba.scsi_xri_cnt,
3207 phba->sli4_hba.scsi_xri_max);
3209 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3210 spin_lock(&phba->scsi_buf_list_put_lock);
3211 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3212 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3213 spin_unlock(&phba->scsi_buf_list_put_lock);
3214 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3216 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3217 /* max scsi xri shrinked below the allocated scsi buffers */
3218 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3219 phba->sli4_hba.scsi_xri_max;
3220 /* release the extra allocated scsi buffers */
3221 for (i = 0; i < scsi_xri_cnt; i++) {
3222 list_remove_head(&scsi_sgl_list, psb,
3223 struct lpfc_scsi_buf, list);
3225 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
3226 psb->data, psb->dma_handle);
3230 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3231 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3232 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3235 /* update xris associated to remaining allocated scsi buffers */
3238 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3239 lxri = lpfc_sli4_next_xritag(phba);
3240 if (lxri == NO_XRI) {
3241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3242 "2560 Failed to allocate xri for "
3247 psb->cur_iocbq.sli4_lxritag = lxri;
3248 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3250 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3251 spin_lock(&phba->scsi_buf_list_put_lock);
3252 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3253 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3254 spin_unlock(&phba->scsi_buf_list_put_lock);
3255 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3260 lpfc_free_els_sgl_list(phba);
3261 lpfc_scsi_free(phba);
3266 * lpfc_create_port - Create an FC port
3267 * @phba: pointer to lpfc hba data structure.
3268 * @instance: a unique integer ID to this FC port.
3269 * @dev: pointer to the device data structure.
3271 * This routine creates a FC port for the upper layer protocol. The FC port
3272 * can be created on top of either a physical port or a virtual port provided
3273 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3274 * and associates the FC port created before adding the shost into the SCSI
3278 * @vport - pointer to the virtual N_Port data structure.
3279 * NULL - port create failed.
3282 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3284 struct lpfc_vport *vport;
3285 struct Scsi_Host *shost;
3288 if (dev != &phba->pcidev->dev) {
3289 shost = scsi_host_alloc(&lpfc_vport_template,
3290 sizeof(struct lpfc_vport));
3292 if (phba->sli_rev == LPFC_SLI_REV4)
3293 shost = scsi_host_alloc(&lpfc_template,
3294 sizeof(struct lpfc_vport));
3296 shost = scsi_host_alloc(&lpfc_template_s3,
3297 sizeof(struct lpfc_vport));
3302 vport = (struct lpfc_vport *) shost->hostdata;
3304 vport->load_flag |= FC_LOADING;
3305 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3306 vport->fc_rscn_flush = 0;
3308 lpfc_get_vport_cfgparam(vport);
3309 shost->unique_id = instance;
3310 shost->max_id = LPFC_MAX_TARGET;
3311 shost->max_lun = vport->cfg_max_luns;
3312 shost->this_id = -1;
3313 shost->max_cmd_len = 16;
3314 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
3315 if (phba->sli_rev == LPFC_SLI_REV4) {
3316 shost->dma_boundary =
3317 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3318 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3322 * Set initial can_queue value since 0 is no longer supported and
3323 * scsi_add_host will fail. This will be adjusted later based on the
3324 * max xri value determined in hba setup.
3326 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3327 if (dev != &phba->pcidev->dev) {
3328 shost->transportt = lpfc_vport_transport_template;
3329 vport->port_type = LPFC_NPIV_PORT;
3331 shost->transportt = lpfc_transport_template;
3332 vport->port_type = LPFC_PHYSICAL_PORT;
3335 /* Initialize all internally managed lists. */
3336 INIT_LIST_HEAD(&vport->fc_nodes);
3337 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3338 spin_lock_init(&vport->work_port_lock);
3340 init_timer(&vport->fc_disctmo);
3341 vport->fc_disctmo.function = lpfc_disc_timeout;
3342 vport->fc_disctmo.data = (unsigned long)vport;
3344 init_timer(&vport->fc_fdmitmo);
3345 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3346 vport->fc_fdmitmo.data = (unsigned long)vport;
3348 init_timer(&vport->els_tmofunc);
3349 vport->els_tmofunc.function = lpfc_els_timeout;
3350 vport->els_tmofunc.data = (unsigned long)vport;
3352 init_timer(&vport->delayed_disc_tmo);
3353 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3354 vport->delayed_disc_tmo.data = (unsigned long)vport;
3356 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3360 spin_lock_irq(&phba->hbalock);
3361 list_add_tail(&vport->listentry, &phba->port_list);
3362 spin_unlock_irq(&phba->hbalock);
3366 scsi_host_put(shost);
3372 * destroy_port - destroy an FC port
3373 * @vport: pointer to an lpfc virtual N_Port data structure.
3375 * This routine destroys a FC port from the upper layer protocol. All the
3376 * resources associated with the port are released.
3379 destroy_port(struct lpfc_vport *vport)
3381 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3382 struct lpfc_hba *phba = vport->phba;
3384 lpfc_debugfs_terminate(vport);
3385 fc_remove_host(shost);
3386 scsi_remove_host(shost);
3388 spin_lock_irq(&phba->hbalock);
3389 list_del_init(&vport->listentry);
3390 spin_unlock_irq(&phba->hbalock);
3392 lpfc_cleanup(vport);
3397 * lpfc_get_instance - Get a unique integer ID
3399 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3400 * uses the kernel idr facility to perform the task.
3403 * instance - a unique integer ID allocated as the new instance.
3404 * -1 - lpfc get instance failed.
3407 lpfc_get_instance(void)
3411 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3412 return ret < 0 ? -1 : ret;
3416 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3417 * @shost: pointer to SCSI host data structure.
3418 * @time: elapsed time of the scan in jiffies.
3420 * This routine is called by the SCSI layer with a SCSI host to determine
3421 * whether the scan host is finished.
3423 * Note: there is no scan_start function as adapter initialization will have
3424 * asynchronously kicked off the link initialization.
3427 * 0 - SCSI host scan is not over yet.
3428 * 1 - SCSI host scan is over.
3430 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3432 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3433 struct lpfc_hba *phba = vport->phba;
3436 spin_lock_irq(shost->host_lock);
3438 if (vport->load_flag & FC_UNLOADING) {
3442 if (time >= msecs_to_jiffies(30 * 1000)) {
3443 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3444 "0461 Scanning longer than 30 "
3445 "seconds. Continuing initialization\n");
3449 if (time >= msecs_to_jiffies(15 * 1000) &&
3450 phba->link_state <= LPFC_LINK_DOWN) {
3451 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3452 "0465 Link down longer than 15 "
3453 "seconds. Continuing initialization\n");
3458 if (vport->port_state != LPFC_VPORT_READY)
3460 if (vport->num_disc_nodes || vport->fc_prli_sent)
3462 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3464 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3470 spin_unlock_irq(shost->host_lock);
3475 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3476 * @shost: pointer to SCSI host data structure.
3478 * This routine initializes a given SCSI host attributes on a FC port. The
3479 * SCSI host can be either on top of a physical port or a virtual port.
3481 void lpfc_host_attrib_init(struct Scsi_Host *shost)
3483 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3484 struct lpfc_hba *phba = vport->phba;
3486 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3489 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3490 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3491 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3493 memset(fc_host_supported_fc4s(shost), 0,
3494 sizeof(fc_host_supported_fc4s(shost)));
3495 fc_host_supported_fc4s(shost)[2] = 1;
3496 fc_host_supported_fc4s(shost)[7] = 1;
3498 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3499 sizeof fc_host_symbolic_name(shost));
3501 fc_host_supported_speeds(shost) = 0;
3502 if (phba->lmt & LMT_32Gb)
3503 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
3504 if (phba->lmt & LMT_16Gb)
3505 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3506 if (phba->lmt & LMT_10Gb)
3507 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3508 if (phba->lmt & LMT_8Gb)
3509 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3510 if (phba->lmt & LMT_4Gb)
3511 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3512 if (phba->lmt & LMT_2Gb)
3513 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3514 if (phba->lmt & LMT_1Gb)
3515 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3517 fc_host_maxframe_size(shost) =
3518 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3519 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3521 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3523 /* This value is also unchanging */
3524 memset(fc_host_active_fc4s(shost), 0,
3525 sizeof(fc_host_active_fc4s(shost)));
3526 fc_host_active_fc4s(shost)[2] = 1;
3527 fc_host_active_fc4s(shost)[7] = 1;
3529 fc_host_max_npiv_vports(shost) = phba->max_vpi;
3530 spin_lock_irq(shost->host_lock);
3531 vport->load_flag &= ~FC_LOADING;
3532 spin_unlock_irq(shost->host_lock);
3536 * lpfc_stop_port_s3 - Stop SLI3 device port
3537 * @phba: pointer to lpfc hba data structure.
3539 * This routine is invoked to stop an SLI3 device port, it stops the device
3540 * from generating interrupts and stops the device driver's timers for the
3544 lpfc_stop_port_s3(struct lpfc_hba *phba)
3546 /* Clear all interrupt enable conditions */
3547 writel(0, phba->HCregaddr);
3548 readl(phba->HCregaddr); /* flush */
3549 /* Clear all pending interrupts */
3550 writel(0xffffffff, phba->HAregaddr);
3551 readl(phba->HAregaddr); /* flush */
3553 /* Reset some HBA SLI setup states */
3554 lpfc_stop_hba_timers(phba);
3555 phba->pport->work_port_events = 0;
3559 * lpfc_stop_port_s4 - Stop SLI4 device port
3560 * @phba: pointer to lpfc hba data structure.
3562 * This routine is invoked to stop an SLI4 device port, it stops the device
3563 * from generating interrupts and stops the device driver's timers for the
3567 lpfc_stop_port_s4(struct lpfc_hba *phba)
3569 /* Reset some HBA SLI4 setup states */
3570 lpfc_stop_hba_timers(phba);
3571 phba->pport->work_port_events = 0;
3572 phba->sli4_hba.intr_enable = 0;
3576 * lpfc_stop_port - Wrapper function for stopping hba port
3577 * @phba: Pointer to HBA context object.
3579 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3580 * the API jump table function pointer from the lpfc_hba struct.
3583 lpfc_stop_port(struct lpfc_hba *phba)
3585 phba->lpfc_stop_port(phba);
3589 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3590 * @phba: Pointer to hba for which this call is being executed.
3592 * This routine starts the timer waiting for the FCF rediscovery to complete.
3595 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3597 unsigned long fcf_redisc_wait_tmo =
3598 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3599 /* Start fcf rediscovery wait period timer */
3600 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3601 spin_lock_irq(&phba->hbalock);
3602 /* Allow action to new fcf asynchronous event */
3603 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3604 /* Mark the FCF rediscovery pending state */
3605 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3606 spin_unlock_irq(&phba->hbalock);
3610 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3611 * @ptr: Map to lpfc_hba data structure pointer.
3613 * This routine is invoked when waiting for FCF table rediscover has been
3614 * timed out. If new FCF record(s) has (have) been discovered during the
3615 * wait period, a new FCF event shall be added to the FCOE async event
3616 * list, and then worker thread shall be waked up for processing from the
3617 * worker thread context.
3620 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3622 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3624 /* Don't send FCF rediscovery event if timer cancelled */
3625 spin_lock_irq(&phba->hbalock);
3626 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3627 spin_unlock_irq(&phba->hbalock);
3630 /* Clear FCF rediscovery timer pending flag */
3631 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3632 /* FCF rediscovery event to worker thread */
3633 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3634 spin_unlock_irq(&phba->hbalock);
3635 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3636 "2776 FCF rediscover quiescent timer expired\n");
3637 /* wake up worker thread */
3638 lpfc_worker_wake_up(phba);
3642 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3643 * @phba: pointer to lpfc hba data structure.
3644 * @acqe_link: pointer to the async link completion queue entry.
3646 * This routine is to parse the SLI4 link-attention link fault code and
3647 * translate it into the base driver's read link attention mailbox command
3650 * Return: Link-attention status in terms of base driver's coding.
3653 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3654 struct lpfc_acqe_link *acqe_link)
3656 uint16_t latt_fault;
3658 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3659 case LPFC_ASYNC_LINK_FAULT_NONE:
3660 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3661 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3666 "0398 Invalid link fault code: x%x\n",
3667 bf_get(lpfc_acqe_link_fault, acqe_link));
3668 latt_fault = MBXERR_ERROR;
3675 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3676 * @phba: pointer to lpfc hba data structure.
3677 * @acqe_link: pointer to the async link completion queue entry.
3679 * This routine is to parse the SLI4 link attention type and translate it
3680 * into the base driver's link attention type coding.
3682 * Return: Link attention type in terms of base driver's coding.
3685 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3686 struct lpfc_acqe_link *acqe_link)
3690 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3691 case LPFC_ASYNC_LINK_STATUS_DOWN:
3692 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3693 att_type = LPFC_ATT_LINK_DOWN;
3695 case LPFC_ASYNC_LINK_STATUS_UP:
3696 /* Ignore physical link up events - wait for logical link up */
3697 att_type = LPFC_ATT_RESERVED;
3699 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3700 att_type = LPFC_ATT_LINK_UP;
3703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3704 "0399 Invalid link attention type: x%x\n",
3705 bf_get(lpfc_acqe_link_status, acqe_link));
3706 att_type = LPFC_ATT_RESERVED;
3713 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3714 * @phba: pointer to lpfc hba data structure.
3715 * @acqe_link: pointer to the async link completion queue entry.
3717 * This routine is to parse the SLI4 link-attention link speed and translate
3718 * it into the base driver's link-attention link speed coding.
3720 * Return: Link-attention link speed in terms of base driver's coding.
3723 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3724 struct lpfc_acqe_link *acqe_link)
3728 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3729 case LPFC_ASYNC_LINK_SPEED_ZERO:
3730 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3731 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3732 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3734 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3735 link_speed = LPFC_LINK_SPEED_1GHZ;
3737 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3738 link_speed = LPFC_LINK_SPEED_10GHZ;
3740 case LPFC_ASYNC_LINK_SPEED_20GBPS:
3741 case LPFC_ASYNC_LINK_SPEED_25GBPS:
3742 case LPFC_ASYNC_LINK_SPEED_40GBPS:
3743 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3747 "0483 Invalid link-attention link speed: x%x\n",
3748 bf_get(lpfc_acqe_link_speed, acqe_link));
3749 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3756 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3757 * @phba: pointer to lpfc hba data structure.
3759 * This routine is to get an SLI3 FC port's link speed in Mbps.
3761 * Return: link speed in terms of Mbps.
3764 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3766 uint32_t link_speed;
3768 if (!lpfc_is_link_up(phba))
3771 switch (phba->fc_linkspeed) {
3772 case LPFC_LINK_SPEED_1GHZ:
3775 case LPFC_LINK_SPEED_2GHZ:
3778 case LPFC_LINK_SPEED_4GHZ:
3781 case LPFC_LINK_SPEED_8GHZ:
3784 case LPFC_LINK_SPEED_10GHZ:
3787 case LPFC_LINK_SPEED_16GHZ:
3797 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3798 * @phba: pointer to lpfc hba data structure.
3799 * @evt_code: asynchronous event code.
3800 * @speed_code: asynchronous event link speed code.
3802 * This routine is to parse the giving SLI4 async event link speed code into
3803 * value of Mbps for the link speed.
3805 * Return: link speed in terms of Mbps.
3808 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3811 uint32_t port_speed;
3814 case LPFC_TRAILER_CODE_LINK:
3815 switch (speed_code) {
3816 case LPFC_ASYNC_LINK_SPEED_ZERO:
3819 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3822 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3825 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3828 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3831 case LPFC_ASYNC_LINK_SPEED_20GBPS:
3834 case LPFC_ASYNC_LINK_SPEED_25GBPS:
3837 case LPFC_ASYNC_LINK_SPEED_40GBPS:
3844 case LPFC_TRAILER_CODE_FC:
3845 switch (speed_code) {
3846 case LPFC_FC_LA_SPEED_UNKNOWN:
3849 case LPFC_FC_LA_SPEED_1G:
3852 case LPFC_FC_LA_SPEED_2G:
3855 case LPFC_FC_LA_SPEED_4G:
3858 case LPFC_FC_LA_SPEED_8G:
3861 case LPFC_FC_LA_SPEED_10G:
3864 case LPFC_FC_LA_SPEED_16G:
3867 case LPFC_FC_LA_SPEED_32G:
3881 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3882 * @phba: pointer to lpfc hba data structure.
3883 * @acqe_link: pointer to the async link completion queue entry.
3885 * This routine is to handle the SLI4 asynchronous FCoE link event.
3888 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3889 struct lpfc_acqe_link *acqe_link)
3891 struct lpfc_dmabuf *mp;
3894 struct lpfc_mbx_read_top *la;
3898 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3899 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3901 phba->fcoe_eventtag = acqe_link->event_tag;
3902 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3905 "0395 The mboxq allocation failed\n");
3908 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3910 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3911 "0396 The lpfc_dmabuf allocation failed\n");
3914 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3917 "0397 The mbuf allocation failed\n");
3918 goto out_free_dmabuf;
3921 /* Cleanup any outstanding ELS commands */
3922 lpfc_els_flush_all_cmd(phba);
3924 /* Block ELS IOCBs until we have done process link event */
3925 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3927 /* Update link event statistics */
3928 phba->sli.slistat.link_event++;
3930 /* Create lpfc_handle_latt mailbox command from link ACQE */
3931 lpfc_read_topology(phba, pmb, mp);
3932 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3933 pmb->vport = phba->pport;
3935 /* Keep the link status for extra SLI4 state machine reference */
3936 phba->sli4_hba.link_state.speed =
3937 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3938 bf_get(lpfc_acqe_link_speed, acqe_link));
3939 phba->sli4_hba.link_state.duplex =
3940 bf_get(lpfc_acqe_link_duplex, acqe_link);
3941 phba->sli4_hba.link_state.status =
3942 bf_get(lpfc_acqe_link_status, acqe_link);
3943 phba->sli4_hba.link_state.type =
3944 bf_get(lpfc_acqe_link_type, acqe_link);
3945 phba->sli4_hba.link_state.number =
3946 bf_get(lpfc_acqe_link_number, acqe_link);
3947 phba->sli4_hba.link_state.fault =
3948 bf_get(lpfc_acqe_link_fault, acqe_link);
3949 phba->sli4_hba.link_state.logical_speed =
3950 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3952 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3953 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3954 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3955 "Logical speed:%dMbps Fault:%d\n",
3956 phba->sli4_hba.link_state.speed,
3957 phba->sli4_hba.link_state.topology,
3958 phba->sli4_hba.link_state.status,
3959 phba->sli4_hba.link_state.type,
3960 phba->sli4_hba.link_state.number,
3961 phba->sli4_hba.link_state.logical_speed,
3962 phba->sli4_hba.link_state.fault);
3964 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3965 * topology info. Note: Optional for non FC-AL ports.
3967 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3968 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3969 if (rc == MBX_NOT_FINISHED)
3970 goto out_free_dmabuf;
3974 * For FCoE Mode: fill in all the topology information we need and call
3975 * the READ_TOPOLOGY completion routine to continue without actually
3976 * sending the READ_TOPOLOGY mailbox command to the port.
3978 /* Parse and translate status field */
3980 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3982 /* Parse and translate link attention fields */
3983 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3984 la->eventTag = acqe_link->event_tag;
3985 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3986 bf_set(lpfc_mbx_read_top_link_spd, la,
3987 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3989 /* Fake the the following irrelvant fields */
3990 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3991 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3992 bf_set(lpfc_mbx_read_top_il, la, 0);
3993 bf_set(lpfc_mbx_read_top_pb, la, 0);
3994 bf_set(lpfc_mbx_read_top_fa, la, 0);
3995 bf_set(lpfc_mbx_read_top_mm, la, 0);
3997 /* Invoke the lpfc_handle_latt mailbox command callback function */
3998 lpfc_mbx_cmpl_read_topology(phba, pmb);
4005 mempool_free(pmb, phba->mbox_mem_pool);
4009 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4010 * @phba: pointer to lpfc hba data structure.
4011 * @acqe_fc: pointer to the async fc completion queue entry.
4013 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4014 * that the event was received and then issue a read_topology mailbox command so
4015 * that the rest of the driver will treat it the same as SLI3.
4018 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4020 struct lpfc_dmabuf *mp;
4024 if (bf_get(lpfc_trailer_type, acqe_fc) !=
4025 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4026 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4027 "2895 Non FC link Event detected.(%d)\n",
4028 bf_get(lpfc_trailer_type, acqe_fc));
4031 /* Keep the link status for extra SLI4 state machine reference */
4032 phba->sli4_hba.link_state.speed =
4033 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4034 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4035 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4036 phba->sli4_hba.link_state.topology =
4037 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4038 phba->sli4_hba.link_state.status =
4039 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4040 phba->sli4_hba.link_state.type =
4041 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4042 phba->sli4_hba.link_state.number =
4043 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4044 phba->sli4_hba.link_state.fault =
4045 bf_get(lpfc_acqe_link_fault, acqe_fc);
4046 phba->sli4_hba.link_state.logical_speed =
4047 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
4048 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4049 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4050 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4051 "%dMbps Fault:%d\n",
4052 phba->sli4_hba.link_state.speed,
4053 phba->sli4_hba.link_state.topology,
4054 phba->sli4_hba.link_state.status,
4055 phba->sli4_hba.link_state.type,
4056 phba->sli4_hba.link_state.number,
4057 phba->sli4_hba.link_state.logical_speed,
4058 phba->sli4_hba.link_state.fault);
4059 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4062 "2897 The mboxq allocation failed\n");
4065 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4067 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4068 "2898 The lpfc_dmabuf allocation failed\n");
4071 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4074 "2899 The mbuf allocation failed\n");
4075 goto out_free_dmabuf;
4078 /* Cleanup any outstanding ELS commands */
4079 lpfc_els_flush_all_cmd(phba);
4081 /* Block ELS IOCBs until we have done process link event */
4082 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
4084 /* Update link event statistics */
4085 phba->sli.slistat.link_event++;
4087 /* Create lpfc_handle_latt mailbox command from link ACQE */
4088 lpfc_read_topology(phba, pmb, mp);
4089 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4090 pmb->vport = phba->pport;
4092 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4093 if (rc == MBX_NOT_FINISHED)
4094 goto out_free_dmabuf;
4100 mempool_free(pmb, phba->mbox_mem_pool);
4104 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4105 * @phba: pointer to lpfc hba data structure.
4106 * @acqe_fc: pointer to the async SLI completion queue entry.
4108 * This routine is to handle the SLI4 asynchronous SLI events.
4111 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4117 struct temp_event temp_event_data;
4118 struct lpfc_acqe_misconfigured_event *misconfigured;
4119 struct Scsi_Host *shost;
4121 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4123 /* Special case Lancer */
4124 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
4125 LPFC_SLI_INTF_IF_TYPE_2) {
4126 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4127 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4128 "x%08x SLI Event Type:%d\n",
4129 acqe_sli->event_data1, acqe_sli->event_data2,
4134 port_name = phba->Port[0];
4135 if (port_name == 0x00)
4136 port_name = '?'; /* get port name is empty */
4139 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4140 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4141 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4142 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4144 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4145 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4146 acqe_sli->event_data1, port_name);
4148 shost = lpfc_shost_from_vport(phba->pport);
4149 fc_host_post_vendor_event(shost, fc_get_event_number(),
4150 sizeof(temp_event_data),
4151 (char *)&temp_event_data,
4152 SCSI_NL_VID_TYPE_PCI
4153 | PCI_VENDOR_ID_EMULEX);
4155 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4156 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4157 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4158 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4160 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4161 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4162 acqe_sli->event_data1, port_name);
4164 shost = lpfc_shost_from_vport(phba->pport);
4165 fc_host_post_vendor_event(shost, fc_get_event_number(),
4166 sizeof(temp_event_data),
4167 (char *)&temp_event_data,
4168 SCSI_NL_VID_TYPE_PCI
4169 | PCI_VENDOR_ID_EMULEX);
4171 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4172 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4173 &acqe_sli->event_data1;
4175 /* fetch the status for this port */
4176 switch (phba->sli4_hba.lnk_info.lnk_no) {
4177 case LPFC_LINK_NUMBER_0:
4178 status = bf_get(lpfc_sli_misconfigured_port0,
4179 &misconfigured->theEvent);
4181 case LPFC_LINK_NUMBER_1:
4182 status = bf_get(lpfc_sli_misconfigured_port1,
4183 &misconfigured->theEvent);
4185 case LPFC_LINK_NUMBER_2:
4186 status = bf_get(lpfc_sli_misconfigured_port2,
4187 &misconfigured->theEvent);
4189 case LPFC_LINK_NUMBER_3:
4190 status = bf_get(lpfc_sli_misconfigured_port3,
4191 &misconfigured->theEvent);
4194 status = ~LPFC_SLI_EVENT_STATUS_VALID;
4199 case LPFC_SLI_EVENT_STATUS_VALID:
4200 return; /* no message if the sfp is okay */
4201 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4202 sprintf(message, "Optics faulted/incorrectly "
4203 "installed/not installed - Reseat optics, "
4204 "if issue not resolved, replace.");
4206 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4208 "Optics of two types installed - Remove one "
4209 "optic or install matching pair of optics.");
4211 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4212 sprintf(message, "Incompatible optics - Replace with "
4213 "compatible optics for card to function.");
4216 /* firmware is reporting a status we don't know about */
4217 sprintf(message, "Unknown event status x%02x", status);
4221 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4222 "3176 Misconfigured Physical Port - "
4223 "Port Name %c %s\n", port_name, message);
4225 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4227 "3192 Remote DPort Test Initiated - "
4228 "Event Data1:x%08x Event Data2: x%08x\n",
4229 acqe_sli->event_data1, acqe_sli->event_data2);
4232 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4233 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4234 "x%08x SLI Event Type:%d\n",
4235 acqe_sli->event_data1, acqe_sli->event_data2,
4242 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4243 * @vport: pointer to vport data structure.
4245 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4246 * response to a CVL event.
4248 * Return the pointer to the ndlp with the vport if successful, otherwise
4251 static struct lpfc_nodelist *
4252 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4254 struct lpfc_nodelist *ndlp;
4255 struct Scsi_Host *shost;
4256 struct lpfc_hba *phba;
4263 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4265 /* Cannot find existing Fabric ndlp, so allocate a new one */
4266 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4269 lpfc_nlp_init(vport, ndlp, Fabric_DID);
4270 /* Set the node type */
4271 ndlp->nlp_type |= NLP_FABRIC;
4272 /* Put ndlp onto node list */
4273 lpfc_enqueue_node(vport, ndlp);
4274 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4275 /* re-setup ndlp without removing from node list */
4276 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4280 if ((phba->pport->port_state < LPFC_FLOGI) &&
4281 (phba->pport->port_state != LPFC_VPORT_FAILED))
4283 /* If virtual link is not yet instantiated ignore CVL */
4284 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4285 && (vport->port_state != LPFC_VPORT_FAILED))
4287 shost = lpfc_shost_from_vport(vport);
4290 lpfc_linkdown_port(vport);
4291 lpfc_cleanup_pending_mbox(vport);
4292 spin_lock_irq(shost->host_lock);
4293 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4294 spin_unlock_irq(shost->host_lock);
4300 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4301 * @vport: pointer to lpfc hba data structure.
4303 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4304 * response to a FCF dead event.
4307 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4309 struct lpfc_vport **vports;
4312 vports = lpfc_create_vport_work_array(phba);
4314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4315 lpfc_sli4_perform_vport_cvl(vports[i]);
4316 lpfc_destroy_vport_work_array(phba, vports);
4320 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4321 * @phba: pointer to lpfc hba data structure.
4322 * @acqe_link: pointer to the async fcoe completion queue entry.
4324 * This routine is to handle the SLI4 asynchronous fcoe event.
4327 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4328 struct lpfc_acqe_fip *acqe_fip)
4330 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4332 struct lpfc_vport *vport;
4333 struct lpfc_nodelist *ndlp;
4334 struct Scsi_Host *shost;
4335 int active_vlink_present;
4336 struct lpfc_vport **vports;
4339 phba->fc_eventTag = acqe_fip->event_tag;
4340 phba->fcoe_eventtag = acqe_fip->event_tag;
4341 switch (event_type) {
4342 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4343 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4344 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4345 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4347 "2546 New FCF event, evt_tag:x%x, "
4349 acqe_fip->event_tag,
4352 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4354 "2788 FCF param modified event, "
4355 "evt_tag:x%x, index:x%x\n",
4356 acqe_fip->event_tag,
4358 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4360 * During period of FCF discovery, read the FCF
4361 * table record indexed by the event to update
4362 * FCF roundrobin failover eligible FCF bmask.
4364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4366 "2779 Read FCF (x%x) for updating "
4367 "roundrobin FCF failover bmask\n",
4369 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4372 /* If the FCF discovery is in progress, do nothing. */
4373 spin_lock_irq(&phba->hbalock);
4374 if (phba->hba_flag & FCF_TS_INPROG) {
4375 spin_unlock_irq(&phba->hbalock);
4378 /* If fast FCF failover rescan event is pending, do nothing */
4379 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
4380 spin_unlock_irq(&phba->hbalock);
4384 /* If the FCF has been in discovered state, do nothing. */
4385 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4386 spin_unlock_irq(&phba->hbalock);
4389 spin_unlock_irq(&phba->hbalock);
4391 /* Otherwise, scan the entire FCF table and re-discover SAN */
4392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4393 "2770 Start FCF table scan per async FCF "
4394 "event, evt_tag:x%x, index:x%x\n",
4395 acqe_fip->event_tag, acqe_fip->index);
4396 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4397 LPFC_FCOE_FCF_GET_FIRST);
4399 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4400 "2547 Issue FCF scan read FCF mailbox "
4401 "command failed (x%x)\n", rc);
4404 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4406 "2548 FCF Table full count 0x%x tag 0x%x\n",
4407 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4408 acqe_fip->event_tag);
4411 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4412 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4413 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4414 "2549 FCF (x%x) disconnected from network, "
4415 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4417 * If we are in the middle of FCF failover process, clear
4418 * the corresponding FCF bit in the roundrobin bitmap.
4420 spin_lock_irq(&phba->hbalock);
4421 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4422 spin_unlock_irq(&phba->hbalock);
4423 /* Update FLOGI FCF failover eligible FCF bmask */
4424 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4427 spin_unlock_irq(&phba->hbalock);
4429 /* If the event is not for currently used fcf do nothing */
4430 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4434 * Otherwise, request the port to rediscover the entire FCF
4435 * table for a fast recovery from case that the current FCF
4436 * is no longer valid as we are not in the middle of FCF
4437 * failover process already.
4439 spin_lock_irq(&phba->hbalock);
4440 /* Mark the fast failover process in progress */
4441 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4442 spin_unlock_irq(&phba->hbalock);
4444 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4445 "2771 Start FCF fast failover process due to "
4446 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4447 "\n", acqe_fip->event_tag, acqe_fip->index);
4448 rc = lpfc_sli4_redisc_fcf_table(phba);
4450 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4452 "2772 Issue FCF rediscover mabilbox "
4453 "command failed, fail through to FCF "
4455 spin_lock_irq(&phba->hbalock);
4456 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4457 spin_unlock_irq(&phba->hbalock);
4459 * Last resort will fail over by treating this
4460 * as a link down to FCF registration.
4462 lpfc_sli4_fcf_dead_failthrough(phba);
4464 /* Reset FCF roundrobin bmask for new discovery */
4465 lpfc_sli4_clear_fcf_rr_bmask(phba);
4467 * Handling fast FCF failover to a DEAD FCF event is
4468 * considered equalivant to receiving CVL to all vports.
4470 lpfc_sli4_perform_all_vport_cvl(phba);
4473 case LPFC_FIP_EVENT_TYPE_CVL:
4474 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4475 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4476 "2718 Clear Virtual Link Received for VPI 0x%x"
4477 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4479 vport = lpfc_find_vport_by_vpid(phba,
4481 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4484 active_vlink_present = 0;
4486 vports = lpfc_create_vport_work_array(phba);
4488 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4490 if ((!(vports[i]->fc_flag &
4491 FC_VPORT_CVL_RCVD)) &&
4492 (vports[i]->port_state > LPFC_FDISC)) {
4493 active_vlink_present = 1;
4497 lpfc_destroy_vport_work_array(phba, vports);
4501 * Don't re-instantiate if vport is marked for deletion.
4502 * If we are here first then vport_delete is going to wait
4503 * for discovery to complete.
4505 if (!(vport->load_flag & FC_UNLOADING) &&
4506 active_vlink_present) {
4508 * If there are other active VLinks present,
4509 * re-instantiate the Vlink using FDISC.
4511 mod_timer(&ndlp->nlp_delayfunc,
4512 jiffies + msecs_to_jiffies(1000));
4513 shost = lpfc_shost_from_vport(vport);
4514 spin_lock_irq(shost->host_lock);
4515 ndlp->nlp_flag |= NLP_DELAY_TMO;
4516 spin_unlock_irq(shost->host_lock);
4517 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4518 vport->port_state = LPFC_FDISC;
4521 * Otherwise, we request port to rediscover
4522 * the entire FCF table for a fast recovery
4523 * from possible case that the current FCF
4524 * is no longer valid if we are not already
4525 * in the FCF failover process.
4527 spin_lock_irq(&phba->hbalock);
4528 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4529 spin_unlock_irq(&phba->hbalock);
4532 /* Mark the fast failover process in progress */
4533 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4534 spin_unlock_irq(&phba->hbalock);
4535 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4537 "2773 Start FCF failover per CVL, "
4538 "evt_tag:x%x\n", acqe_fip->event_tag);
4539 rc = lpfc_sli4_redisc_fcf_table(phba);
4541 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4543 "2774 Issue FCF rediscover "
4544 "mabilbox command failed, "
4545 "through to CVL event\n");
4546 spin_lock_irq(&phba->hbalock);
4547 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4548 spin_unlock_irq(&phba->hbalock);
4550 * Last resort will be re-try on the
4551 * the current registered FCF entry.
4553 lpfc_retry_pport_discovery(phba);
4556 * Reset FCF roundrobin bmask for new
4559 lpfc_sli4_clear_fcf_rr_bmask(phba);
4563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4564 "0288 Unknown FCoE event type 0x%x event tag "
4565 "0x%x\n", event_type, acqe_fip->event_tag);
4571 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4572 * @phba: pointer to lpfc hba data structure.
4573 * @acqe_link: pointer to the async dcbx completion queue entry.
4575 * This routine is to handle the SLI4 asynchronous dcbx event.
4578 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4579 struct lpfc_acqe_dcbx *acqe_dcbx)
4581 phba->fc_eventTag = acqe_dcbx->event_tag;
4582 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4583 "0290 The SLI4 DCBX asynchronous event is not "
4588 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4589 * @phba: pointer to lpfc hba data structure.
4590 * @acqe_link: pointer to the async grp5 completion queue entry.
4592 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4593 * is an asynchronous notified of a logical link speed change. The Port
4594 * reports the logical link speed in units of 10Mbps.
4597 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4598 struct lpfc_acqe_grp5 *acqe_grp5)
4600 uint16_t prev_ll_spd;
4602 phba->fc_eventTag = acqe_grp5->event_tag;
4603 phba->fcoe_eventtag = acqe_grp5->event_tag;
4604 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4605 phba->sli4_hba.link_state.logical_speed =
4606 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4607 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4608 "2789 GRP5 Async Event: Updating logical link speed "
4609 "from %dMbps to %dMbps\n", prev_ll_spd,
4610 phba->sli4_hba.link_state.logical_speed);
4614 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4615 * @phba: pointer to lpfc hba data structure.
4617 * This routine is invoked by the worker thread to process all the pending
4618 * SLI4 asynchronous events.
4620 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4622 struct lpfc_cq_event *cq_event;
4624 /* First, declare the async event has been handled */
4625 spin_lock_irq(&phba->hbalock);
4626 phba->hba_flag &= ~ASYNC_EVENT;
4627 spin_unlock_irq(&phba->hbalock);
4628 /* Now, handle all the async events */
4629 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4630 /* Get the first event from the head of the event queue */
4631 spin_lock_irq(&phba->hbalock);
4632 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4633 cq_event, struct lpfc_cq_event, list);
4634 spin_unlock_irq(&phba->hbalock);
4635 /* Process the asynchronous event */
4636 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4637 case LPFC_TRAILER_CODE_LINK:
4638 lpfc_sli4_async_link_evt(phba,
4639 &cq_event->cqe.acqe_link);
4641 case LPFC_TRAILER_CODE_FCOE:
4642 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4644 case LPFC_TRAILER_CODE_DCBX:
4645 lpfc_sli4_async_dcbx_evt(phba,
4646 &cq_event->cqe.acqe_dcbx);
4648 case LPFC_TRAILER_CODE_GRP5:
4649 lpfc_sli4_async_grp5_evt(phba,
4650 &cq_event->cqe.acqe_grp5);
4652 case LPFC_TRAILER_CODE_FC:
4653 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4655 case LPFC_TRAILER_CODE_SLI:
4656 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4659 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4660 "1804 Invalid asynchrous event code: "
4661 "x%x\n", bf_get(lpfc_trailer_code,
4662 &cq_event->cqe.mcqe_cmpl));
4665 /* Free the completion event processed to the free pool */
4666 lpfc_sli4_cq_event_release(phba, cq_event);
4671 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4672 * @phba: pointer to lpfc hba data structure.
4674 * This routine is invoked by the worker thread to process FCF table
4675 * rediscovery pending completion event.
4677 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4681 spin_lock_irq(&phba->hbalock);
4682 /* Clear FCF rediscovery timeout event */
4683 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4684 /* Clear driver fast failover FCF record flag */
4685 phba->fcf.failover_rec.flag = 0;
4686 /* Set state for FCF fast failover */
4687 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4688 spin_unlock_irq(&phba->hbalock);
4690 /* Scan FCF table from the first entry to re-discover SAN */
4691 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4692 "2777 Start post-quiescent FCF table scan\n");
4693 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4695 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4696 "2747 Issue FCF scan read FCF mailbox "
4697 "command failed 0x%x\n", rc);
4701 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4702 * @phba: pointer to lpfc hba data structure.
4703 * @dev_grp: The HBA PCI-Device group number.
4705 * This routine is invoked to set up the per HBA PCI-Device group function
4706 * API jump table entries.
4708 * Return: 0 if success, otherwise -ENODEV
4711 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4715 /* Set up lpfc PCI-device group */
4716 phba->pci_dev_grp = dev_grp;
4718 /* The LPFC_PCI_DEV_OC uses SLI4 */
4719 if (dev_grp == LPFC_PCI_DEV_OC)
4720 phba->sli_rev = LPFC_SLI_REV4;
4722 /* Set up device INIT API function jump table */
4723 rc = lpfc_init_api_table_setup(phba, dev_grp);
4726 /* Set up SCSI API function jump table */
4727 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4730 /* Set up SLI API function jump table */
4731 rc = lpfc_sli_api_table_setup(phba, dev_grp);
4734 /* Set up MBOX API function jump table */
4735 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4743 * lpfc_log_intr_mode - Log the active interrupt mode
4744 * @phba: pointer to lpfc hba data structure.
4745 * @intr_mode: active interrupt mode adopted.
4747 * This routine it invoked to log the currently used active interrupt mode
4750 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4752 switch (intr_mode) {
4754 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4755 "0470 Enable INTx interrupt mode.\n");
4758 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4759 "0481 Enabled MSI interrupt mode.\n");
4762 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4763 "0480 Enabled MSI-X interrupt mode.\n");
4766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4767 "0482 Illegal interrupt mode.\n");
4774 * lpfc_enable_pci_dev - Enable a generic PCI device.
4775 * @phba: pointer to lpfc hba data structure.
4777 * This routine is invoked to enable the PCI device that is common to all
4782 * other values - error
4785 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4787 struct pci_dev *pdev;
4790 /* Obtain PCI device reference */
4794 pdev = phba->pcidev;
4795 /* Select PCI BARs */
4796 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4797 /* Enable PCI device */
4798 if (pci_enable_device_mem(pdev))
4800 /* Request PCI resource for the device */
4801 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4802 goto out_disable_device;
4803 /* Set up device as PCI master and save state for EEH */
4804 pci_set_master(pdev);
4805 pci_try_set_mwi(pdev);
4806 pci_save_state(pdev);
4808 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4809 if (pci_is_pcie(pdev))
4810 pdev->needs_freset = 1;
4815 pci_disable_device(pdev);
4817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4818 "1401 Failed to enable pci device, bars:x%x\n", bars);
4823 * lpfc_disable_pci_dev - Disable a generic PCI device.
4824 * @phba: pointer to lpfc hba data structure.
4826 * This routine is invoked to disable the PCI device that is common to all
4830 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4832 struct pci_dev *pdev;
4835 /* Obtain PCI device reference */
4839 pdev = phba->pcidev;
4840 /* Select PCI BARs */
4841 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4842 /* Release PCI resource and disable PCI device */
4843 pci_release_selected_regions(pdev, bars);
4844 pci_disable_device(pdev);
4850 * lpfc_reset_hba - Reset a hba
4851 * @phba: pointer to lpfc hba data structure.
4853 * This routine is invoked to reset a hba device. It brings the HBA
4854 * offline, performs a board restart, and then brings the board back
4855 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4856 * on outstanding mailbox commands.
4859 lpfc_reset_hba(struct lpfc_hba *phba)
4861 /* If resets are disabled then set error state and return. */
4862 if (!phba->cfg_enable_hba_reset) {
4863 phba->link_state = LPFC_HBA_ERROR;
4866 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
4867 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4869 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
4871 lpfc_sli_brdrestart(phba);
4873 lpfc_unblock_mgmt_io(phba);
4877 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4878 * @phba: pointer to lpfc hba data structure.
4880 * This function enables the PCI SR-IOV virtual functions to a physical
4881 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4882 * enable the number of virtual functions to the physical function. As
4883 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4884 * API call does not considered as an error condition for most of the device.
4887 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4889 struct pci_dev *pdev = phba->pcidev;
4893 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4897 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4902 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4903 * @phba: pointer to lpfc hba data structure.
4904 * @nr_vfn: number of virtual functions to be enabled.
4906 * This function enables the PCI SR-IOV virtual functions to a physical
4907 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4908 * enable the number of virtual functions to the physical function. As
4909 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4910 * API call does not considered as an error condition for most of the device.
4913 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4915 struct pci_dev *pdev = phba->pcidev;
4916 uint16_t max_nr_vfn;
4919 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4920 if (nr_vfn > max_nr_vfn) {
4921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4922 "3057 Requested vfs (%d) greater than "
4923 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4927 rc = pci_enable_sriov(pdev, nr_vfn);
4929 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4930 "2806 Failed to enable sriov on this device "
4931 "with vfn number nr_vf:%d, rc:%d\n",
4934 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4935 "2807 Successful enable sriov on this device "
4936 "with vfn number nr_vf:%d\n", nr_vfn);
4941 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4942 * @phba: pointer to lpfc hba data structure.
4944 * This routine is invoked to set up the driver internal resources specific to
4945 * support the SLI-3 HBA device it attached to.
4949 * other values - error
4952 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4954 struct lpfc_sli *psli;
4958 * Initialize timers used by driver
4961 /* Heartbeat timer */
4962 init_timer(&phba->hb_tmofunc);
4963 phba->hb_tmofunc.function = lpfc_hb_timeout;
4964 phba->hb_tmofunc.data = (unsigned long)phba;
4967 /* MBOX heartbeat timer */
4968 init_timer(&psli->mbox_tmo);
4969 psli->mbox_tmo.function = lpfc_mbox_timeout;
4970 psli->mbox_tmo.data = (unsigned long) phba;
4971 /* FCP polling mode timer */
4972 init_timer(&phba->fcp_poll_timer);
4973 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4974 phba->fcp_poll_timer.data = (unsigned long) phba;
4975 /* Fabric block timer */
4976 init_timer(&phba->fabric_block_timer);
4977 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4978 phba->fabric_block_timer.data = (unsigned long) phba;
4979 /* EA polling mode timer */
4980 init_timer(&phba->eratt_poll);
4981 phba->eratt_poll.function = lpfc_poll_eratt;
4982 phba->eratt_poll.data = (unsigned long) phba;
4984 /* Host attention work mask setup */
4985 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4986 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4988 /* Get all the module params for configuring this host */
4989 lpfc_get_cfgparam(phba);
4990 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4991 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4992 /* check for menlo minimum sg count */
4993 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4994 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4997 if (!phba->sli.ring)
4998 phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
4999 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5000 if (!phba->sli.ring)
5004 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5005 * used to create the sg_dma_buf_pool must be dynamically calculated.
5008 /* Initialize the host templates the configured values. */
5009 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5010 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
5012 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5013 if (phba->cfg_enable_bg) {
5015 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5016 * the FCP rsp, and a BDE for each. Sice we have no control
5017 * over how many protection data segments the SCSI Layer
5018 * will hand us (ie: there could be one for every block
5019 * in the IO), we just allocate enough BDEs to accomidate
5020 * our max amount and we need to limit lpfc_sg_seg_cnt to
5021 * minimize the risk of running out.
5023 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5024 sizeof(struct fcp_rsp) +
5025 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5027 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5028 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5030 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5031 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5034 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5035 * the FCP rsp, a BDE for each, and a BDE for up to
5036 * cfg_sg_seg_cnt data segments.
5038 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5039 sizeof(struct fcp_rsp) +
5040 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5042 /* Total BDEs in BPL for scsi_sg_list */
5043 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5046 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5047 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5048 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5049 phba->cfg_total_seg_cnt);
5051 phba->max_vpi = LPFC_MAX_VPI;
5052 /* This will be set to correct value after config_port mbox */
5053 phba->max_vports = 0;
5056 * Initialize the SLI Layer to run with lpfc HBAs.
5058 lpfc_sli_setup(phba);
5059 lpfc_sli_queue_setup(phba);
5061 /* Allocate device driver memory */
5062 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5066 * Enable sr-iov virtual functions if supported and configured
5067 * through the module parameter.
5069 if (phba->cfg_sriov_nr_virtfn > 0) {
5070 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5071 phba->cfg_sriov_nr_virtfn);
5073 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5074 "2808 Requested number of SR-IOV "
5075 "virtual functions (%d) is not "
5077 phba->cfg_sriov_nr_virtfn);
5078 phba->cfg_sriov_nr_virtfn = 0;
5086 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5087 * @phba: pointer to lpfc hba data structure.
5089 * This routine is invoked to unset the driver internal resources set up
5090 * specific for supporting the SLI-3 HBA device it attached to.
5093 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5095 /* Free device driver memory allocated */
5096 lpfc_mem_free_all(phba);
5102 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
5103 * @phba: pointer to lpfc hba data structure.
5105 * This routine is invoked to set up the driver internal resources specific to
5106 * support the SLI-4 HBA device it attached to.
5110 * other values - error
5113 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5115 struct lpfc_vector_map_info *cpup;
5116 struct lpfc_sli *psli;
5117 LPFC_MBOXQ_t *mboxq;
5118 int rc, i, hbq_count, max_buf_size;
5119 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5120 struct lpfc_mqe *mqe;
5122 int fof_vectors = 0;
5124 /* Get all the module params for configuring this host */
5125 lpfc_get_cfgparam(phba);
5127 /* Before proceed, wait for POST done and device ready */
5128 rc = lpfc_sli4_post_status_check(phba);
5133 * Initialize timers used by driver
5136 /* Heartbeat timer */
5137 init_timer(&phba->hb_tmofunc);
5138 phba->hb_tmofunc.function = lpfc_hb_timeout;
5139 phba->hb_tmofunc.data = (unsigned long)phba;
5140 init_timer(&phba->rrq_tmr);
5141 phba->rrq_tmr.function = lpfc_rrq_timeout;
5142 phba->rrq_tmr.data = (unsigned long)phba;
5145 /* MBOX heartbeat timer */
5146 init_timer(&psli->mbox_tmo);
5147 psli->mbox_tmo.function = lpfc_mbox_timeout;
5148 psli->mbox_tmo.data = (unsigned long) phba;
5149 /* Fabric block timer */
5150 init_timer(&phba->fabric_block_timer);
5151 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
5152 phba->fabric_block_timer.data = (unsigned long) phba;
5153 /* EA polling mode timer */
5154 init_timer(&phba->eratt_poll);
5155 phba->eratt_poll.function = lpfc_poll_eratt;
5156 phba->eratt_poll.data = (unsigned long) phba;
5157 /* FCF rediscover timer */
5158 init_timer(&phba->fcf.redisc_wait);
5159 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
5160 phba->fcf.redisc_wait.data = (unsigned long)phba;
5163 * Control structure for handling external multi-buffer mailbox
5164 * command pass-through.
5166 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5167 sizeof(struct lpfc_mbox_ext_buf_ctx));
5168 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5170 phba->max_vpi = LPFC_MAX_VPI;
5172 /* This will be set to correct value after the read_config mbox */
5173 phba->max_vports = 0;
5175 /* Program the default value of vlan_id and fc_map */
5176 phba->valid_vlan = 0;
5177 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5178 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5179 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5182 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5183 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
5185 if (!phba->sli.ring)
5186 phba->sli.ring = kzalloc(
5187 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
5188 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5189 if (!phba->sli.ring)
5193 * It doesn't matter what family our adapter is in, we are
5194 * limited to 2 Pages, 512 SGEs, for our SGL.
5195 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5197 max_buf_size = (2 * SLI4_PAGE_SIZE);
5198 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
5199 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
5202 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5203 * used to create the sg_dma_buf_pool must be dynamically calculated.
5206 if (phba->cfg_enable_bg) {
5208 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5209 * the FCP rsp, and a SGE for each. Sice we have no control
5210 * over how many protection data segments the SCSI Layer
5211 * will hand us (ie: there could be one for every block
5212 * in the IO), we just allocate enough SGEs to accomidate
5213 * our max amount and we need to limit lpfc_sg_seg_cnt to
5214 * minimize the risk of running out.
5216 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5217 sizeof(struct fcp_rsp) + max_buf_size;
5219 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5220 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5222 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
5223 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
5226 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5227 * the FCP rsp, a SGE for each, and a SGE for up to
5228 * cfg_sg_seg_cnt data segments.
5230 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5231 sizeof(struct fcp_rsp) +
5232 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
5234 /* Total SGEs for scsi_sg_list */
5235 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5237 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
5238 * to post 1 page for the SGL.
5242 /* Initialize the host templates with the updated values. */
5243 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5244 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5246 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5247 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5249 phba->cfg_sg_dma_buf_size =
5250 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5253 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5254 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5255 phba->cfg_total_seg_cnt);
5257 /* Initialize buffer queue management fields */
5258 hbq_count = lpfc_sli_hbq_count();
5259 for (i = 0; i < hbq_count; ++i)
5260 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5261 INIT_LIST_HEAD(&phba->rb_pend_list);
5262 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5263 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
5266 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5268 /* Initialize the Abort scsi buffer list used by driver */
5269 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5270 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5271 /* This abort list used by worker thread */
5272 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
5275 * Initialize driver internal slow-path work queues
5278 /* Driver internel slow-path CQ Event pool */
5279 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5280 /* Response IOCB work queue list */
5281 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5282 /* Asynchronous event CQ Event work queue list */
5283 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5284 /* Fast-path XRI aborted CQ Event work queue list */
5285 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5286 /* Slow-path XRI aborted CQ Event work queue list */
5287 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5288 /* Receive queue CQ Event work queue list */
5289 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5291 /* Initialize extent block lists. */
5292 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5293 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5294 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5295 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5297 /* Initialize the driver internal SLI layer lists. */
5298 lpfc_sli_setup(phba);
5299 lpfc_sli_queue_setup(phba);
5301 /* Allocate device driver memory */
5302 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5306 /* IF Type 2 ports get initialized now. */
5307 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5308 LPFC_SLI_INTF_IF_TYPE_2) {
5309 rc = lpfc_pci_function_reset(phba);
5312 phba->temp_sensor_support = 1;
5315 /* Create the bootstrap mailbox command */
5316 rc = lpfc_create_bootstrap_mbox(phba);
5320 /* Set up the host's endian order with the device. */
5321 rc = lpfc_setup_endian_order(phba);
5323 goto out_free_bsmbx;
5325 /* Set up the hba's configuration parameters. */
5326 rc = lpfc_sli4_read_config(phba);
5328 goto out_free_bsmbx;
5329 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5331 goto out_free_bsmbx;
5333 /* IF Type 0 ports get initialized now. */
5334 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5335 LPFC_SLI_INTF_IF_TYPE_0) {
5336 rc = lpfc_pci_function_reset(phba);
5338 goto out_free_bsmbx;
5341 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5345 goto out_free_bsmbx;
5348 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5349 lpfc_supported_pages(mboxq);
5350 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5352 mqe = &mboxq->u.mqe;
5353 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5354 LPFC_MAX_SUPPORTED_PAGES);
5355 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5356 switch (pn_page[i]) {
5357 case LPFC_SLI4_PARAMETERS:
5358 phba->sli4_hba.pc_sli4_params.supported = 1;
5364 /* Read the port's SLI4 Parameters capabilities if supported. */
5365 if (phba->sli4_hba.pc_sli4_params.supported)
5366 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5368 mempool_free(mboxq, phba->mbox_mem_pool);
5370 goto out_free_bsmbx;
5374 * Get sli4 parameters that override parameters from Port capabilities.
5375 * If this call fails, it isn't critical unless the SLI4 parameters come
5378 rc = lpfc_get_sli4_parameters(phba, mboxq);
5380 if (phba->sli4_hba.extents_in_use &&
5381 phba->sli4_hba.rpi_hdrs_in_use) {
5382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5383 "2999 Unsupported SLI4 Parameters "
5384 "Extents and RPI headers enabled.\n");
5385 goto out_free_bsmbx;
5388 mempool_free(mboxq, phba->mbox_mem_pool);
5390 /* Verify OAS is supported */
5391 lpfc_sli4_oas_verify(phba);
5395 /* Verify all the SLI4 queues */
5396 rc = lpfc_sli4_queue_verify(phba);
5398 goto out_free_bsmbx;
5400 /* Create driver internal CQE event pool */
5401 rc = lpfc_sli4_cq_event_pool_create(phba);
5403 goto out_free_bsmbx;
5405 /* Initialize sgl lists per host */
5406 lpfc_init_sgl_list(phba);
5408 /* Allocate and initialize active sgl array */
5409 rc = lpfc_init_active_sgl_array(phba);
5411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5412 "1430 Failed to initialize sgl list.\n");
5413 goto out_destroy_cq_event_pool;
5415 rc = lpfc_sli4_init_rpi_hdrs(phba);
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "1432 Failed to initialize rpi headers.\n");
5419 goto out_free_active_sgl;
5422 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5423 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5424 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5426 if (!phba->fcf.fcf_rr_bmask) {
5427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5428 "2759 Failed allocate memory for FCF round "
5429 "robin failover bmask\n");
5431 goto out_remove_rpi_hdrs;
5434 phba->sli4_hba.fcp_eq_hdl =
5435 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5436 (fof_vectors + phba->cfg_fcp_io_channel)),
5438 if (!phba->sli4_hba.fcp_eq_hdl) {
5439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5440 "2572 Failed allocate memory for "
5441 "fast-path per-EQ handle array\n");
5443 goto out_free_fcf_rr_bmask;
5446 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5448 phba->cfg_fcp_io_channel)), GFP_KERNEL);
5449 if (!phba->sli4_hba.msix_entries) {
5450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5451 "2573 Failed allocate memory for msi-x "
5452 "interrupt vector entries\n");
5454 goto out_free_fcp_eq_hdl;
5457 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5458 phba->sli4_hba.num_present_cpu),
5460 if (!phba->sli4_hba.cpu_map) {
5461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5462 "3327 Failed allocate memory for msi-x "
5463 "interrupt vector mapping\n");
5467 if (lpfc_used_cpu == NULL) {
5468 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5470 if (!lpfc_used_cpu) {
5471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5472 "3335 Failed allocate memory for msi-x "
5473 "interrupt vector mapping\n");
5474 kfree(phba->sli4_hba.cpu_map);
5478 for (i = 0; i < lpfc_present_cpu; i++)
5479 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5482 /* Initialize io channels for round robin */
5483 cpup = phba->sli4_hba.cpu_map;
5485 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5486 cpup->channel_id = rc;
5488 if (rc >= phba->cfg_fcp_io_channel)
5493 * Enable sr-iov virtual functions if supported and configured
5494 * through the module parameter.
5496 if (phba->cfg_sriov_nr_virtfn > 0) {
5497 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5498 phba->cfg_sriov_nr_virtfn);
5500 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5501 "3020 Requested number of SR-IOV "
5502 "virtual functions (%d) is not "
5504 phba->cfg_sriov_nr_virtfn);
5505 phba->cfg_sriov_nr_virtfn = 0;
5512 kfree(phba->sli4_hba.msix_entries);
5513 out_free_fcp_eq_hdl:
5514 kfree(phba->sli4_hba.fcp_eq_hdl);
5515 out_free_fcf_rr_bmask:
5516 kfree(phba->fcf.fcf_rr_bmask);
5517 out_remove_rpi_hdrs:
5518 lpfc_sli4_remove_rpi_hdrs(phba);
5519 out_free_active_sgl:
5520 lpfc_free_active_sgl(phba);
5521 out_destroy_cq_event_pool:
5522 lpfc_sli4_cq_event_pool_destroy(phba);
5524 lpfc_destroy_bootstrap_mbox(phba);
5526 lpfc_mem_free(phba);
5531 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5532 * @phba: pointer to lpfc hba data structure.
5534 * This routine is invoked to unset the driver internal resources set up
5535 * specific for supporting the SLI-4 HBA device it attached to.
5538 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5540 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5542 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5543 kfree(phba->sli4_hba.cpu_map);
5544 phba->sli4_hba.num_present_cpu = 0;
5545 phba->sli4_hba.num_online_cpu = 0;
5546 phba->sli4_hba.curr_disp_cpu = 0;
5548 /* Free memory allocated for msi-x interrupt vector entries */
5549 kfree(phba->sli4_hba.msix_entries);
5551 /* Free memory allocated for fast-path work queue handles */
5552 kfree(phba->sli4_hba.fcp_eq_hdl);
5554 /* Free the allocated rpi headers. */
5555 lpfc_sli4_remove_rpi_hdrs(phba);
5556 lpfc_sli4_remove_rpis(phba);
5558 /* Free eligible FCF index bmask */
5559 kfree(phba->fcf.fcf_rr_bmask);
5561 /* Free the ELS sgl list */
5562 lpfc_free_active_sgl(phba);
5563 lpfc_free_els_sgl_list(phba);
5565 /* Free the completion queue EQ event pool */
5566 lpfc_sli4_cq_event_release_all(phba);
5567 lpfc_sli4_cq_event_pool_destroy(phba);
5569 /* Release resource identifiers. */
5570 lpfc_sli4_dealloc_resource_identifiers(phba);
5572 /* Free the bsmbx region. */
5573 lpfc_destroy_bootstrap_mbox(phba);
5575 /* Free the SLI Layer memory with SLI4 HBAs */
5576 lpfc_mem_free_all(phba);
5578 /* Free the current connect table */
5579 list_for_each_entry_safe(conn_entry, next_conn_entry,
5580 &phba->fcf_conn_rec_list, list) {
5581 list_del_init(&conn_entry->list);
5589 * lpfc_init_api_table_setup - Set up init api function jump table
5590 * @phba: The hba struct for which this call is being executed.
5591 * @dev_grp: The HBA PCI-Device group number.
5593 * This routine sets up the device INIT interface API function jump table
5596 * Returns: 0 - success, -ENODEV - failure.
5599 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5601 phba->lpfc_hba_init_link = lpfc_hba_init_link;
5602 phba->lpfc_hba_down_link = lpfc_hba_down_link;
5603 phba->lpfc_selective_reset = lpfc_selective_reset;
5605 case LPFC_PCI_DEV_LP:
5606 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5607 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5608 phba->lpfc_stop_port = lpfc_stop_port_s3;
5610 case LPFC_PCI_DEV_OC:
5611 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5612 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5613 phba->lpfc_stop_port = lpfc_stop_port_s4;
5616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5617 "1431 Invalid HBA PCI-device group: 0x%x\n",
5626 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5627 * @phba: pointer to lpfc hba data structure.
5629 * This routine is invoked to set up the driver internal resources before the
5630 * device specific resource setup to support the HBA device it attached to.
5634 * other values - error
5637 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5640 * Driver resources common to all SLI revisions
5642 atomic_set(&phba->fast_event_count, 0);
5643 spin_lock_init(&phba->hbalock);
5645 /* Initialize ndlp management spinlock */
5646 spin_lock_init(&phba->ndlp_lock);
5648 INIT_LIST_HEAD(&phba->port_list);
5649 INIT_LIST_HEAD(&phba->work_list);
5650 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5652 /* Initialize the wait queue head for the kernel thread */
5653 init_waitqueue_head(&phba->work_waitq);
5655 /* Initialize the scsi buffer list used by driver for scsi IO */
5656 spin_lock_init(&phba->scsi_buf_list_get_lock);
5657 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5658 spin_lock_init(&phba->scsi_buf_list_put_lock);
5659 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5661 /* Initialize the fabric iocb list */
5662 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5664 /* Initialize list to save ELS buffers */
5665 INIT_LIST_HEAD(&phba->elsbuf);
5667 /* Initialize FCF connection rec list */
5668 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5670 /* Initialize OAS configuration list */
5671 spin_lock_init(&phba->devicelock);
5672 INIT_LIST_HEAD(&phba->luns);
5678 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5679 * @phba: pointer to lpfc hba data structure.
5681 * This routine is invoked to set up the driver internal resources after the
5682 * device specific resource setup to support the HBA device it attached to.
5686 * other values - error
5689 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5693 /* Startup the kernel thread for this host adapter. */
5694 phba->worker_thread = kthread_run(lpfc_do_work, phba,
5695 "lpfc_worker_%d", phba->brd_no);
5696 if (IS_ERR(phba->worker_thread)) {
5697 error = PTR_ERR(phba->worker_thread);
5705 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5706 * @phba: pointer to lpfc hba data structure.
5708 * This routine is invoked to unset the driver internal resources set up after
5709 * the device specific resource setup for supporting the HBA device it
5713 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5715 /* Stop kernel worker thread */
5716 kthread_stop(phba->worker_thread);
5720 * lpfc_free_iocb_list - Free iocb list.
5721 * @phba: pointer to lpfc hba data structure.
5723 * This routine is invoked to free the driver's IOCB list and memory.
5726 lpfc_free_iocb_list(struct lpfc_hba *phba)
5728 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5730 spin_lock_irq(&phba->hbalock);
5731 list_for_each_entry_safe(iocbq_entry, iocbq_next,
5732 &phba->lpfc_iocb_list, list) {
5733 list_del(&iocbq_entry->list);
5735 phba->total_iocbq_bufs--;
5737 spin_unlock_irq(&phba->hbalock);
5743 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5744 * @phba: pointer to lpfc hba data structure.
5746 * This routine is invoked to allocate and initizlize the driver's IOCB
5747 * list and set up the IOCB tag array accordingly.
5751 * other values - error
5754 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5756 struct lpfc_iocbq *iocbq_entry = NULL;
5760 /* Initialize and populate the iocb list per host. */
5761 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5762 for (i = 0; i < iocb_count; i++) {
5763 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5764 if (iocbq_entry == NULL) {
5765 printk(KERN_ERR "%s: only allocated %d iocbs of "
5766 "expected %d count. Unloading driver.\n",
5767 __func__, i, LPFC_IOCB_LIST_CNT);
5768 goto out_free_iocbq;
5771 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5774 printk(KERN_ERR "%s: failed to allocate IOTAG. "
5775 "Unloading driver.\n", __func__);
5776 goto out_free_iocbq;
5778 iocbq_entry->sli4_lxritag = NO_XRI;
5779 iocbq_entry->sli4_xritag = NO_XRI;
5781 spin_lock_irq(&phba->hbalock);
5782 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5783 phba->total_iocbq_bufs++;
5784 spin_unlock_irq(&phba->hbalock);
5790 lpfc_free_iocb_list(phba);
5796 * lpfc_free_sgl_list - Free a given sgl list.
5797 * @phba: pointer to lpfc hba data structure.
5798 * @sglq_list: pointer to the head of sgl list.
5800 * This routine is invoked to free a give sgl list and memory.
5803 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5805 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5807 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5808 list_del(&sglq_entry->list);
5809 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5815 * lpfc_free_els_sgl_list - Free els sgl list.
5816 * @phba: pointer to lpfc hba data structure.
5818 * This routine is invoked to free the driver's els sgl list and memory.
5821 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5823 LIST_HEAD(sglq_list);
5824 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5826 /* Retrieve all els sgls from driver list */
5827 spin_lock_irq(&phba->hbalock);
5828 spin_lock(&pring->ring_lock);
5829 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5830 spin_unlock(&pring->ring_lock);
5831 spin_unlock_irq(&phba->hbalock);
5833 /* Now free the sgl list */
5834 lpfc_free_sgl_list(phba, &sglq_list);
5838 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5839 * @phba: pointer to lpfc hba data structure.
5841 * This routine is invoked to allocate the driver's active sgl memory.
5842 * This array will hold the sglq_entry's for active IOs.
5845 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5848 size = sizeof(struct lpfc_sglq *);
5849 size *= phba->sli4_hba.max_cfg_param.max_xri;
5851 phba->sli4_hba.lpfc_sglq_active_list =
5852 kzalloc(size, GFP_KERNEL);
5853 if (!phba->sli4_hba.lpfc_sglq_active_list)
5859 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5860 * @phba: pointer to lpfc hba data structure.
5862 * This routine is invoked to walk through the array of active sglq entries
5863 * and free all of the resources.
5864 * This is just a place holder for now.
5867 lpfc_free_active_sgl(struct lpfc_hba *phba)
5869 kfree(phba->sli4_hba.lpfc_sglq_active_list);
5873 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5874 * @phba: pointer to lpfc hba data structure.
5876 * This routine is invoked to allocate and initizlize the driver's sgl
5877 * list and set up the sgl xritag tag array accordingly.
5881 lpfc_init_sgl_list(struct lpfc_hba *phba)
5883 /* Initialize and populate the sglq list per host/VF. */
5884 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5885 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5887 /* els xri-sgl book keeping */
5888 phba->sli4_hba.els_xri_cnt = 0;
5890 /* scsi xri-buffer book keeping */
5891 phba->sli4_hba.scsi_xri_cnt = 0;
5895 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5896 * @phba: pointer to lpfc hba data structure.
5898 * This routine is invoked to post rpi header templates to the
5899 * port for those SLI4 ports that do not support extents. This routine
5900 * posts a PAGE_SIZE memory region to the port to hold up to
5901 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5902 * and should be called only when interrupts are disabled.
5906 * -ERROR - otherwise.
5909 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5912 struct lpfc_rpi_hdr *rpi_hdr;
5914 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5915 if (!phba->sli4_hba.rpi_hdrs_in_use)
5917 if (phba->sli4_hba.extents_in_use)
5920 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5922 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5923 "0391 Error during rpi post operation\n");
5924 lpfc_sli4_remove_rpis(phba);
5932 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5933 * @phba: pointer to lpfc hba data structure.
5935 * This routine is invoked to allocate a single 4KB memory region to
5936 * support rpis and stores them in the phba. This single region
5937 * provides support for up to 64 rpis. The region is used globally
5941 * A valid rpi hdr on success.
5942 * A NULL pointer on any failure.
5944 struct lpfc_rpi_hdr *
5945 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5947 uint16_t rpi_limit, curr_rpi_range;
5948 struct lpfc_dmabuf *dmabuf;
5949 struct lpfc_rpi_hdr *rpi_hdr;
5953 * If the SLI4 port supports extents, posting the rpi header isn't
5954 * required. Set the expected maximum count and let the actual value
5955 * get set when extents are fully allocated.
5957 if (!phba->sli4_hba.rpi_hdrs_in_use)
5959 if (phba->sli4_hba.extents_in_use)
5962 /* The limit on the logical index is just the max_rpi count. */
5963 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5964 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5966 spin_lock_irq(&phba->hbalock);
5968 * Establish the starting RPI in this header block. The starting
5969 * rpi is normalized to a zero base because the physical rpi is
5972 curr_rpi_range = phba->sli4_hba.next_rpi;
5973 spin_unlock_irq(&phba->hbalock);
5976 * The port has a limited number of rpis. The increment here
5977 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
5978 * and to allow the full max_rpi range per port.
5980 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5981 rpi_count = rpi_limit - curr_rpi_range;
5983 rpi_count = LPFC_RPI_HDR_COUNT;
5988 * First allocate the protocol header region for the port. The
5989 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
5991 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5995 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
5996 LPFC_HDR_TEMPLATE_SIZE,
5997 &dmabuf->phys, GFP_KERNEL);
5998 if (!dmabuf->virt) {
6000 goto err_free_dmabuf;
6003 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6005 goto err_free_coherent;
6008 /* Save the rpi header data for cleanup later. */
6009 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6011 goto err_free_coherent;
6013 rpi_hdr->dmabuf = dmabuf;
6014 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6015 rpi_hdr->page_count = 1;
6016 spin_lock_irq(&phba->hbalock);
6018 /* The rpi_hdr stores the logical index only. */
6019 rpi_hdr->start_rpi = curr_rpi_range;
6020 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6023 * The next_rpi stores the next logical module-64 rpi value used
6024 * to post physical rpis in subsequent rpi postings.
6026 phba->sli4_hba.next_rpi += rpi_count;
6027 spin_unlock_irq(&phba->hbalock);
6031 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6032 dmabuf->virt, dmabuf->phys);
6039 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6040 * @phba: pointer to lpfc hba data structure.
6042 * This routine is invoked to remove all memory resources allocated
6043 * to support rpis for SLI4 ports not supporting extents. This routine
6044 * presumes the caller has released all rpis consumed by fabric or port
6045 * logins and is prepared to have the header pages removed.
6048 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6050 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6052 if (!phba->sli4_hba.rpi_hdrs_in_use)
6055 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6056 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6057 list_del(&rpi_hdr->list);
6058 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6059 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6060 kfree(rpi_hdr->dmabuf);
6064 /* There are no rpis available to the port now. */
6065 phba->sli4_hba.next_rpi = 0;
6069 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6070 * @pdev: pointer to pci device data structure.
6072 * This routine is invoked to allocate the driver hba data structure for an
6073 * HBA device. If the allocation is successful, the phba reference to the
6074 * PCI device data structure is set.
6077 * pointer to @phba - successful
6080 static struct lpfc_hba *
6081 lpfc_hba_alloc(struct pci_dev *pdev)
6083 struct lpfc_hba *phba;
6085 /* Allocate memory for HBA structure */
6086 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6088 dev_err(&pdev->dev, "failed to allocate hba struct\n");
6092 /* Set reference to PCI device in HBA structure */
6093 phba->pcidev = pdev;
6095 /* Assign an unused board number */
6096 phba->brd_no = lpfc_get_instance();
6097 if (phba->brd_no < 0) {
6102 spin_lock_init(&phba->ct_ev_lock);
6103 INIT_LIST_HEAD(&phba->ct_ev_waiters);
6109 * lpfc_hba_free - Free driver hba data structure with a device.
6110 * @phba: pointer to lpfc hba data structure.
6112 * This routine is invoked to free the driver hba data structure with an
6116 lpfc_hba_free(struct lpfc_hba *phba)
6118 /* Release the driver assigned board number */
6119 idr_remove(&lpfc_hba_index, phba->brd_no);
6121 /* Free memory allocated with sli rings */
6122 kfree(phba->sli.ring);
6123 phba->sli.ring = NULL;
6130 * lpfc_create_shost - Create hba physical port with associated scsi host.
6131 * @phba: pointer to lpfc hba data structure.
6133 * This routine is invoked to create HBA physical port and associate a SCSI
6138 * other values - error
6141 lpfc_create_shost(struct lpfc_hba *phba)
6143 struct lpfc_vport *vport;
6144 struct Scsi_Host *shost;
6146 /* Initialize HBA FC structure */
6147 phba->fc_edtov = FF_DEF_EDTOV;
6148 phba->fc_ratov = FF_DEF_RATOV;
6149 phba->fc_altov = FF_DEF_ALTOV;
6150 phba->fc_arbtov = FF_DEF_ARBTOV;
6152 atomic_set(&phba->sdev_cnt, 0);
6153 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6157 shost = lpfc_shost_from_vport(vport);
6158 phba->pport = vport;
6159 lpfc_debugfs_initialize(vport);
6160 /* Put reference to SCSI host to driver's device private data */
6161 pci_set_drvdata(phba->pcidev, shost);
6167 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6168 * @phba: pointer to lpfc hba data structure.
6170 * This routine is invoked to destroy HBA physical port and the associated
6174 lpfc_destroy_shost(struct lpfc_hba *phba)
6176 struct lpfc_vport *vport = phba->pport;
6178 /* Destroy physical port that associated with the SCSI host */
6179 destroy_port(vport);
6185 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6186 * @phba: pointer to lpfc hba data structure.
6187 * @shost: the shost to be used to detect Block guard settings.
6189 * This routine sets up the local Block guard protocol settings for @shost.
6190 * This routine also allocates memory for debugging bg buffers.
6193 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6199 if (lpfc_prot_mask && lpfc_prot_guard) {
6200 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6201 "1478 Registering BlockGuard with the "
6204 old_mask = lpfc_prot_mask;
6205 old_guard = lpfc_prot_guard;
6207 /* Only allow supported values */
6208 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
6209 SHOST_DIX_TYPE0_PROTECTION |
6210 SHOST_DIX_TYPE1_PROTECTION);
6211 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
6213 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6214 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6215 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
6217 if (lpfc_prot_mask && lpfc_prot_guard) {
6218 if ((old_mask != lpfc_prot_mask) ||
6219 (old_guard != lpfc_prot_guard))
6220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6221 "1475 Registering BlockGuard with the "
6222 "SCSI layer: mask %d guard %d\n",
6223 lpfc_prot_mask, lpfc_prot_guard);
6225 scsi_host_set_prot(shost, lpfc_prot_mask);
6226 scsi_host_set_guard(shost, lpfc_prot_guard);
6228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6229 "1479 Not Registering BlockGuard with the SCSI "
6230 "layer, Bad protection parameters: %d %d\n",
6231 old_mask, old_guard);
6234 if (!_dump_buf_data) {
6236 spin_lock_init(&_dump_buf_lock);
6238 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6239 if (_dump_buf_data) {
6240 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6241 "9043 BLKGRD: allocated %d pages for "
6242 "_dump_buf_data at 0x%p\n",
6243 (1 << pagecnt), _dump_buf_data);
6244 _dump_buf_data_order = pagecnt;
6245 memset(_dump_buf_data, 0,
6246 ((1 << PAGE_SHIFT) << pagecnt));
6251 if (!_dump_buf_data_order)
6252 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6253 "9044 BLKGRD: ERROR unable to allocate "
6254 "memory for hexdump\n");
6256 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6257 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6258 "\n", _dump_buf_data);
6259 if (!_dump_buf_dif) {
6262 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6263 if (_dump_buf_dif) {
6264 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6265 "9046 BLKGRD: allocated %d pages for "
6266 "_dump_buf_dif at 0x%p\n",
6267 (1 << pagecnt), _dump_buf_dif);
6268 _dump_buf_dif_order = pagecnt;
6269 memset(_dump_buf_dif, 0,
6270 ((1 << PAGE_SHIFT) << pagecnt));
6275 if (!_dump_buf_dif_order)
6276 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6277 "9047 BLKGRD: ERROR unable to allocate "
6278 "memory for hexdump\n");
6280 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6281 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6286 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6287 * @phba: pointer to lpfc hba data structure.
6289 * This routine is invoked to perform all the necessary post initialization
6290 * setup for the device.
6293 lpfc_post_init_setup(struct lpfc_hba *phba)
6295 struct Scsi_Host *shost;
6296 struct lpfc_adapter_event_header adapter_event;
6298 /* Get the default values for Model Name and Description */
6299 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6302 * hba setup may have changed the hba_queue_depth so we need to
6303 * adjust the value of can_queue.
6305 shost = pci_get_drvdata(phba->pcidev);
6306 shost->can_queue = phba->cfg_hba_queue_depth - 10;
6307 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6308 lpfc_setup_bg(phba, shost);
6310 lpfc_host_attrib_init(shost);
6312 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6313 spin_lock_irq(shost->host_lock);
6314 lpfc_poll_start_timer(phba);
6315 spin_unlock_irq(shost->host_lock);
6318 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6319 "0428 Perform SCSI scan\n");
6320 /* Send board arrival event to upper layer */
6321 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6322 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6323 fc_host_post_vendor_event(shost, fc_get_event_number(),
6324 sizeof(adapter_event),
6325 (char *) &adapter_event,
6331 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6332 * @phba: pointer to lpfc hba data structure.
6334 * This routine is invoked to set up the PCI device memory space for device
6335 * with SLI-3 interface spec.
6339 * other values - error
6342 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6344 struct pci_dev *pdev;
6345 unsigned long bar0map_len, bar2map_len;
6348 int error = -ENODEV;
6350 /* Obtain PCI device reference */
6354 pdev = phba->pcidev;
6356 /* Set the device DMA mask size */
6357 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6358 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6359 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6360 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6365 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6366 * required by each mapping.
6368 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6369 bar0map_len = pci_resource_len(pdev, 0);
6371 phba->pci_bar2_map = pci_resource_start(pdev, 2);
6372 bar2map_len = pci_resource_len(pdev, 2);
6374 /* Map HBA SLIM to a kernel virtual address. */
6375 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6376 if (!phba->slim_memmap_p) {
6377 dev_printk(KERN_ERR, &pdev->dev,
6378 "ioremap failed for SLIM memory.\n");
6382 /* Map HBA Control Registers to a kernel virtual address. */
6383 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6384 if (!phba->ctrl_regs_memmap_p) {
6385 dev_printk(KERN_ERR, &pdev->dev,
6386 "ioremap failed for HBA control registers.\n");
6387 goto out_iounmap_slim;
6390 /* Allocate memory for SLI-2 structures */
6391 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6392 &phba->slim2p.phys, GFP_KERNEL);
6393 if (!phba->slim2p.virt)
6396 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6397 phba->mbox_ext = (phba->slim2p.virt +
6398 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6400 phba->IOCBs = (phba->slim2p.virt +
6401 offsetof(struct lpfc_sli2_slim, IOCBs));
6403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6404 lpfc_sli_hbq_size(),
6405 &phba->hbqslimp.phys,
6407 if (!phba->hbqslimp.virt)
6410 hbq_count = lpfc_sli_hbq_count();
6411 ptr = phba->hbqslimp.virt;
6412 for (i = 0; i < hbq_count; ++i) {
6413 phba->hbqs[i].hbq_virt = ptr;
6414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6415 ptr += (lpfc_hbq_defs[i]->entry_count *
6416 sizeof(struct lpfc_hbq_entry));
6418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6423 INIT_LIST_HEAD(&phba->rb_pend_list);
6425 phba->MBslimaddr = phba->slim_memmap_p;
6426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6435 phba->slim2p.virt, phba->slim2p.phys);
6437 iounmap(phba->ctrl_regs_memmap_p);
6439 iounmap(phba->slim_memmap_p);
6445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6446 * @phba: pointer to lpfc hba data structure.
6448 * This routine is invoked to unset the PCI device memory space for device
6449 * with SLI-3 interface spec.
6452 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6454 struct pci_dev *pdev;
6456 /* Obtain PCI device reference */
6460 pdev = phba->pcidev;
6462 /* Free coherent DMA memory allocated */
6463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6464 phba->hbqslimp.virt, phba->hbqslimp.phys);
6465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6466 phba->slim2p.virt, phba->slim2p.phys);
6468 /* I/O memory unmap */
6469 iounmap(phba->ctrl_regs_memmap_p);
6470 iounmap(phba->slim_memmap_p);
6476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6477 * @phba: pointer to lpfc hba data structure.
6479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6480 * done and check status.
6482 * Return 0 if successful, otherwise -ENODEV.
6485 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6487 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6488 struct lpfc_register reg_data;
6489 int i, port_error = 0;
6492 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6493 memset(®_data, 0, sizeof(reg_data));
6494 if (!phba->sli4_hba.PSMPHRregaddr)
6497 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6498 for (i = 0; i < 3000; i++) {
6499 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6500 &portsmphr_reg.word0) ||
6501 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6502 /* Port has a fatal POST error, break out */
6503 port_error = -ENODEV;
6506 if (LPFC_POST_STAGE_PORT_READY ==
6507 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6513 * If there was a port error during POST, then don't proceed with
6514 * other register reads as the data may not be valid. Just exit.
6517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6518 "1408 Port Failed POST - portsmphr=0x%x, "
6519 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6520 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6521 portsmphr_reg.word0,
6522 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6523 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6524 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6525 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6526 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6527 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6528 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6529 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6532 "2534 Device Info: SLIFamily=0x%x, "
6533 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6534 "SLIHint_2=0x%x, FT=0x%x\n",
6535 bf_get(lpfc_sli_intf_sli_family,
6536 &phba->sli4_hba.sli_intf),
6537 bf_get(lpfc_sli_intf_slirev,
6538 &phba->sli4_hba.sli_intf),
6539 bf_get(lpfc_sli_intf_if_type,
6540 &phba->sli4_hba.sli_intf),
6541 bf_get(lpfc_sli_intf_sli_hint1,
6542 &phba->sli4_hba.sli_intf),
6543 bf_get(lpfc_sli_intf_sli_hint2,
6544 &phba->sli4_hba.sli_intf),
6545 bf_get(lpfc_sli_intf_func_type,
6546 &phba->sli4_hba.sli_intf));
6548 * Check for other Port errors during the initialization
6549 * process. Fail the load if the port did not come up
6552 if_type = bf_get(lpfc_sli_intf_if_type,
6553 &phba->sli4_hba.sli_intf);
6555 case LPFC_SLI_INTF_IF_TYPE_0:
6556 phba->sli4_hba.ue_mask_lo =
6557 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6558 phba->sli4_hba.ue_mask_hi =
6559 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6561 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6563 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6564 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6565 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6567 "1422 Unrecoverable Error "
6568 "Detected during POST "
6569 "uerr_lo_reg=0x%x, "
6570 "uerr_hi_reg=0x%x, "
6571 "ue_mask_lo_reg=0x%x, "
6572 "ue_mask_hi_reg=0x%x\n",
6575 phba->sli4_hba.ue_mask_lo,
6576 phba->sli4_hba.ue_mask_hi);
6577 port_error = -ENODEV;
6580 case LPFC_SLI_INTF_IF_TYPE_2:
6581 /* Final checks. The port status should be clean. */
6582 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6584 (bf_get(lpfc_sliport_status_err, ®_data) &&
6585 !bf_get(lpfc_sliport_status_rn, ®_data))) {
6586 phba->work_status[0] =
6587 readl(phba->sli4_hba.u.if_type2.
6589 phba->work_status[1] =
6590 readl(phba->sli4_hba.u.if_type2.
6592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6593 "2888 Unrecoverable port error "
6594 "following POST: port status reg "
6595 "0x%x, port_smphr reg 0x%x, "
6596 "error 1=0x%x, error 2=0x%x\n",
6598 portsmphr_reg.word0,
6599 phba->work_status[0],
6600 phba->work_status[1]);
6601 port_error = -ENODEV;
6604 case LPFC_SLI_INTF_IF_TYPE_1:
6613 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6614 * @phba: pointer to lpfc hba data structure.
6615 * @if_type: The SLI4 interface type getting configured.
6617 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6621 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6624 case LPFC_SLI_INTF_IF_TYPE_0:
6625 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6626 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6627 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6628 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6629 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6630 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6631 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6632 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6633 phba->sli4_hba.SLIINTFregaddr =
6634 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6636 case LPFC_SLI_INTF_IF_TYPE_2:
6637 phba->sli4_hba.u.if_type2.ERR1regaddr =
6638 phba->sli4_hba.conf_regs_memmap_p +
6639 LPFC_CTL_PORT_ER1_OFFSET;
6640 phba->sli4_hba.u.if_type2.ERR2regaddr =
6641 phba->sli4_hba.conf_regs_memmap_p +
6642 LPFC_CTL_PORT_ER2_OFFSET;
6643 phba->sli4_hba.u.if_type2.CTRLregaddr =
6644 phba->sli4_hba.conf_regs_memmap_p +
6645 LPFC_CTL_PORT_CTL_OFFSET;
6646 phba->sli4_hba.u.if_type2.STATUSregaddr =
6647 phba->sli4_hba.conf_regs_memmap_p +
6648 LPFC_CTL_PORT_STA_OFFSET;
6649 phba->sli4_hba.SLIINTFregaddr =
6650 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6651 phba->sli4_hba.PSMPHRregaddr =
6652 phba->sli4_hba.conf_regs_memmap_p +
6653 LPFC_CTL_PORT_SEM_OFFSET;
6654 phba->sli4_hba.RQDBregaddr =
6655 phba->sli4_hba.conf_regs_memmap_p +
6656 LPFC_ULP0_RQ_DOORBELL;
6657 phba->sli4_hba.WQDBregaddr =
6658 phba->sli4_hba.conf_regs_memmap_p +
6659 LPFC_ULP0_WQ_DOORBELL;
6660 phba->sli4_hba.EQCQDBregaddr =
6661 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6662 phba->sli4_hba.MQDBregaddr =
6663 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6664 phba->sli4_hba.BMBXregaddr =
6665 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6667 case LPFC_SLI_INTF_IF_TYPE_1:
6669 dev_printk(KERN_ERR, &phba->pcidev->dev,
6670 "FATAL - unsupported SLI4 interface type - %d\n",
6677 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6678 * @phba: pointer to lpfc hba data structure.
6680 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6684 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6686 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6687 LPFC_SLIPORT_IF0_SMPHR;
6688 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6690 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6692 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6697 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6698 * @phba: pointer to lpfc hba data structure.
6699 * @vf: virtual function number
6701 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6702 * based on the given viftual function number, @vf.
6704 * Return 0 if successful, otherwise -ENODEV.
6707 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6709 if (vf > LPFC_VIR_FUNC_MAX)
6712 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6713 vf * LPFC_VFR_PAGE_SIZE +
6714 LPFC_ULP0_RQ_DOORBELL);
6715 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6716 vf * LPFC_VFR_PAGE_SIZE +
6717 LPFC_ULP0_WQ_DOORBELL);
6718 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6719 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6720 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6721 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6722 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6723 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6728 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6729 * @phba: pointer to lpfc hba data structure.
6731 * This routine is invoked to create the bootstrap mailbox
6732 * region consistent with the SLI-4 interface spec. This
6733 * routine allocates all memory necessary to communicate
6734 * mailbox commands to the port and sets up all alignment
6735 * needs. No locks are expected to be held when calling
6740 * -ENOMEM - could not allocated memory.
6743 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6746 struct lpfc_dmabuf *dmabuf;
6747 struct dma_address *dma_address;
6751 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6756 * The bootstrap mailbox region is comprised of 2 parts
6757 * plus an alignment restriction of 16 bytes.
6759 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6760 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
6761 &dmabuf->phys, GFP_KERNEL);
6762 if (!dmabuf->virt) {
6768 * Initialize the bootstrap mailbox pointers now so that the register
6769 * operations are simple later. The mailbox dma address is required
6770 * to be 16-byte aligned. Also align the virtual memory as each
6771 * maibox is copied into the bmbx mailbox region before issuing the
6772 * command to the port.
6774 phba->sli4_hba.bmbx.dmabuf = dmabuf;
6775 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6777 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6778 LPFC_ALIGN_16_BYTE);
6779 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6780 LPFC_ALIGN_16_BYTE);
6783 * Set the high and low physical addresses now. The SLI4 alignment
6784 * requirement is 16 bytes and the mailbox is posted to the port
6785 * as two 30-bit addresses. The other data is a bit marking whether
6786 * the 30-bit address is the high or low address.
6787 * Upcast bmbx aphys to 64bits so shift instruction compiles
6788 * clean on 32 bit machines.
6790 dma_address = &phba->sli4_hba.bmbx.dma_address;
6791 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6792 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6793 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6794 LPFC_BMBX_BIT1_ADDR_HI);
6796 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6797 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6798 LPFC_BMBX_BIT1_ADDR_LO);
6803 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6804 * @phba: pointer to lpfc hba data structure.
6806 * This routine is invoked to teardown the bootstrap mailbox
6807 * region and release all host resources. This routine requires
6808 * the caller to ensure all mailbox commands recovered, no
6809 * additional mailbox comands are sent, and interrupts are disabled
6810 * before calling this routine.
6814 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6816 dma_free_coherent(&phba->pcidev->dev,
6817 phba->sli4_hba.bmbx.bmbx_size,
6818 phba->sli4_hba.bmbx.dmabuf->virt,
6819 phba->sli4_hba.bmbx.dmabuf->phys);
6821 kfree(phba->sli4_hba.bmbx.dmabuf);
6822 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6826 * lpfc_sli4_read_config - Get the config parameters.
6827 * @phba: pointer to lpfc hba data structure.
6829 * This routine is invoked to read the configuration parameters from the HBA.
6830 * The configuration parameters are used to set the base and maximum values
6831 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6832 * allocation for the port.
6836 * -ENOMEM - No available memory
6837 * -EIO - The mailbox failed to complete successfully.
6840 lpfc_sli4_read_config(struct lpfc_hba *phba)
6843 struct lpfc_mbx_read_config *rd_config;
6844 union lpfc_sli4_cfg_shdr *shdr;
6845 uint32_t shdr_status, shdr_add_status;
6846 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6847 struct lpfc_rsrc_desc_fcfcoe *desc;
6849 int length, i, rc = 0, rc2;
6851 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6853 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6854 "2011 Unable to allocate memory for issuing "
6855 "SLI_CONFIG_SPECIAL mailbox command\n");
6859 lpfc_read_config(phba, pmb);
6861 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6862 if (rc != MBX_SUCCESS) {
6863 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6864 "2012 Mailbox failed , mbxCmd x%x "
6865 "READ_CONFIG, mbxStatus x%x\n",
6866 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6867 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6870 rd_config = &pmb->u.mqe.un.rd_config;
6871 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6872 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6873 phba->sli4_hba.lnk_info.lnk_tp =
6874 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6875 phba->sli4_hba.lnk_info.lnk_no =
6876 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6878 "3081 lnk_type:%d, lnk_numb:%d\n",
6879 phba->sli4_hba.lnk_info.lnk_tp,
6880 phba->sli4_hba.lnk_info.lnk_no);
6882 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6883 "3082 Mailbox (x%x) returned ldv:x0\n",
6884 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6885 phba->sli4_hba.extents_in_use =
6886 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6887 phba->sli4_hba.max_cfg_param.max_xri =
6888 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6889 phba->sli4_hba.max_cfg_param.xri_base =
6890 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6891 phba->sli4_hba.max_cfg_param.max_vpi =
6892 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6893 /* Limit the max we support */
6894 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
6895 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
6896 phba->sli4_hba.max_cfg_param.vpi_base =
6897 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6898 phba->sli4_hba.max_cfg_param.max_rpi =
6899 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6900 phba->sli4_hba.max_cfg_param.rpi_base =
6901 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6902 phba->sli4_hba.max_cfg_param.max_vfi =
6903 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6904 phba->sli4_hba.max_cfg_param.vfi_base =
6905 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6906 phba->sli4_hba.max_cfg_param.max_fcfi =
6907 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6908 phba->sli4_hba.max_cfg_param.max_eq =
6909 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6910 phba->sli4_hba.max_cfg_param.max_rq =
6911 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6912 phba->sli4_hba.max_cfg_param.max_wq =
6913 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6914 phba->sli4_hba.max_cfg_param.max_cq =
6915 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6916 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6917 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6918 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6919 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6920 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6921 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6922 phba->max_vports = phba->max_vpi;
6923 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6924 "2003 cfg params Extents? %d "
6930 phba->sli4_hba.extents_in_use,
6931 phba->sli4_hba.max_cfg_param.xri_base,
6932 phba->sli4_hba.max_cfg_param.max_xri,
6933 phba->sli4_hba.max_cfg_param.vpi_base,
6934 phba->sli4_hba.max_cfg_param.max_vpi,
6935 phba->sli4_hba.max_cfg_param.vfi_base,
6936 phba->sli4_hba.max_cfg_param.max_vfi,
6937 phba->sli4_hba.max_cfg_param.rpi_base,
6938 phba->sli4_hba.max_cfg_param.max_rpi,
6939 phba->sli4_hba.max_cfg_param.max_fcfi);
6945 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
6946 length = phba->sli4_hba.max_cfg_param.max_xri -
6947 lpfc_sli4_get_els_iocb_cnt(phba);
6948 if (phba->cfg_hba_queue_depth > length) {
6949 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6950 "3361 HBA queue depth changed from %d to %d\n",
6951 phba->cfg_hba_queue_depth, length);
6952 phba->cfg_hba_queue_depth = length;
6955 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6956 LPFC_SLI_INTF_IF_TYPE_2)
6959 /* get the pf# and vf# for SLI4 if_type 2 port */
6960 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6961 sizeof(struct lpfc_sli4_cfg_mhdr));
6962 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6963 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6964 length, LPFC_SLI4_MBX_EMBED);
6966 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6967 shdr = (union lpfc_sli4_cfg_shdr *)
6968 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6969 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6971 if (rc2 || shdr_status || shdr_add_status) {
6972 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6973 "3026 Mailbox failed , mbxCmd x%x "
6974 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6975 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6976 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6980 /* search for fc_fcoe resrouce descriptor */
6981 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6983 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6984 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6985 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6986 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6987 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6988 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6991 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6992 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6993 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6994 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6995 phba->sli4_hba.iov.pf_number =
6996 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6997 phba->sli4_hba.iov.vf_number =
6998 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7003 if (i < LPFC_RSRC_DESC_MAX_NUM)
7004 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7005 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7006 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7007 phba->sli4_hba.iov.vf_number);
7009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7010 "3028 GET_FUNCTION_CONFIG: failed to find "
7011 "Resrouce Descriptor:x%x\n",
7012 LPFC_RSRC_DESC_TYPE_FCFCOE);
7015 mempool_free(pmb, phba->mbox_mem_pool);
7020 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
7021 * @phba: pointer to lpfc hba data structure.
7023 * This routine is invoked to setup the port-side endian order when
7024 * the port if_type is 0. This routine has no function for other
7029 * -ENOMEM - No available memory
7030 * -EIO - The mailbox failed to complete successfully.
7033 lpfc_setup_endian_order(struct lpfc_hba *phba)
7035 LPFC_MBOXQ_t *mboxq;
7036 uint32_t if_type, rc = 0;
7037 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
7038 HOST_ENDIAN_HIGH_WORD1};
7040 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7042 case LPFC_SLI_INTF_IF_TYPE_0:
7043 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7047 "0492 Unable to allocate memory for "
7048 "issuing SLI_CONFIG_SPECIAL mailbox "
7054 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7055 * two words to contain special data values and no other data.
7057 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
7058 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
7059 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7060 if (rc != MBX_SUCCESS) {
7061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7062 "0493 SLI_CONFIG_SPECIAL mailbox "
7063 "failed with status x%x\n",
7067 mempool_free(mboxq, phba->mbox_mem_pool);
7069 case LPFC_SLI_INTF_IF_TYPE_2:
7070 case LPFC_SLI_INTF_IF_TYPE_1:
7078 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
7079 * @phba: pointer to lpfc hba data structure.
7081 * This routine is invoked to check the user settable queue counts for EQs and
7082 * CQs. after this routine is called the counts will be set to valid values that
7083 * adhere to the constraints of the system's interrupt vectors and the port's
7088 * -ENOMEM - No available memory
7091 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
7093 int cfg_fcp_io_channel;
7096 int fof_vectors = phba->cfg_fof ? 1 : 0;
7099 * Sanity check for configured queue parameters against the run-time
7103 /* Sanity check on HBA EQ parameters */
7104 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
7106 /* It doesn't make sense to have more io channels then online CPUs */
7107 for_each_present_cpu(cpu) {
7108 if (cpu_online(cpu))
7111 phba->sli4_hba.num_online_cpu = i;
7112 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7113 phba->sli4_hba.curr_disp_cpu = 0;
7115 if (i < cfg_fcp_io_channel) {
7116 lpfc_printf_log(phba,
7118 "3188 Reducing IO channels to match number of "
7119 "online CPUs: from %d to %d\n",
7120 cfg_fcp_io_channel, i);
7121 cfg_fcp_io_channel = i;
7124 if (cfg_fcp_io_channel + fof_vectors >
7125 phba->sli4_hba.max_cfg_param.max_eq) {
7126 if (phba->sli4_hba.max_cfg_param.max_eq <
7127 LPFC_FCP_IO_CHAN_MIN) {
7128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7129 "2574 Not enough EQs (%d) from the "
7130 "pci function for supporting FCP "
7132 phba->sli4_hba.max_cfg_param.max_eq,
7133 phba->cfg_fcp_io_channel);
7136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7137 "2575 Reducing IO channels to match number of "
7138 "available EQs: from %d to %d\n",
7140 phba->sli4_hba.max_cfg_param.max_eq);
7141 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
7145 /* The actual number of FCP event queues adopted */
7146 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
7148 /* Get EQ depth from module parameter, fake the default for now */
7149 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7150 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
7152 /* Get CQ depth from module parameter, fake the default for now */
7153 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7154 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
7162 * lpfc_sli4_queue_create - Create all the SLI4 queues
7163 * @phba: pointer to lpfc hba data structure.
7165 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7166 * operation. For each SLI4 queue type, the parameters such as queue entry
7167 * count (queue depth) shall be taken from the module parameter. For now,
7168 * we just use some constant number as place holder.
7172 * -ENOMEM - No availble memory
7173 * -EIO - The mailbox failed to complete successfully.
7176 lpfc_sli4_queue_create(struct lpfc_hba *phba)
7178 struct lpfc_queue *qdesc;
7182 * Create HBA Record arrays.
7184 if (!phba->cfg_fcp_io_channel)
7187 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
7188 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
7189 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
7190 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
7191 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
7192 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
7194 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
7195 phba->cfg_fcp_io_channel), GFP_KERNEL);
7196 if (!phba->sli4_hba.hba_eq) {
7197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7198 "2576 Failed allocate memory for "
7199 "fast-path EQ record array\n");
7203 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
7204 phba->cfg_fcp_io_channel), GFP_KERNEL);
7205 if (!phba->sli4_hba.fcp_cq) {
7206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7207 "2577 Failed allocate memory for fast-path "
7208 "CQ record array\n");
7212 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
7213 phba->cfg_fcp_io_channel), GFP_KERNEL);
7214 if (!phba->sli4_hba.fcp_wq) {
7215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7216 "2578 Failed allocate memory for fast-path "
7217 "WQ record array\n");
7222 * Since the first EQ can have multiple CQs associated with it,
7223 * this array is used to quickly see if we have a FCP fast-path
7226 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
7227 phba->cfg_fcp_io_channel), GFP_KERNEL);
7228 if (!phba->sli4_hba.fcp_cq_map) {
7229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7230 "2545 Failed allocate memory for fast-path "
7236 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
7237 * how many EQs to create.
7239 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7242 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
7243 phba->sli4_hba.eq_ecount);
7245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7246 "0497 Failed allocate EQ (%d)\n", idx);
7249 phba->sli4_hba.hba_eq[idx] = qdesc;
7251 /* Create Fast Path FCP CQs */
7252 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7253 phba->sli4_hba.cq_ecount);
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "0499 Failed allocate fast-path FCP "
7260 phba->sli4_hba.fcp_cq[idx] = qdesc;
7262 /* Create Fast Path FCP WQs */
7263 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7264 phba->sli4_hba.wq_ecount);
7266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7267 "0503 Failed allocate fast-path FCP "
7271 phba->sli4_hba.fcp_wq[idx] = qdesc;
7276 * Create Slow Path Completion Queues (CQs)
7279 /* Create slow-path Mailbox Command Complete Queue */
7280 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7281 phba->sli4_hba.cq_ecount);
7283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7284 "0500 Failed allocate slow-path mailbox CQ\n");
7287 phba->sli4_hba.mbx_cq = qdesc;
7289 /* Create slow-path ELS Complete Queue */
7290 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7291 phba->sli4_hba.cq_ecount);
7293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7294 "0501 Failed allocate slow-path ELS CQ\n");
7297 phba->sli4_hba.els_cq = qdesc;
7301 * Create Slow Path Work Queues (WQs)
7304 /* Create Mailbox Command Queue */
7306 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7307 phba->sli4_hba.mq_ecount);
7309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7310 "0505 Failed allocate slow-path MQ\n");
7313 phba->sli4_hba.mbx_wq = qdesc;
7316 * Create ELS Work Queues
7319 /* Create slow-path ELS Work Queue */
7320 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7321 phba->sli4_hba.wq_ecount);
7323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7324 "0504 Failed allocate slow-path ELS WQ\n");
7327 phba->sli4_hba.els_wq = qdesc;
7330 * Create Receive Queue (RQ)
7333 /* Create Receive Queue for header */
7334 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7335 phba->sli4_hba.rq_ecount);
7337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7338 "0506 Failed allocate receive HRQ\n");
7341 phba->sli4_hba.hdr_rq = qdesc;
7343 /* Create Receive Queue for data */
7344 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7345 phba->sli4_hba.rq_ecount);
7347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7348 "0507 Failed allocate receive DRQ\n");
7351 phba->sli4_hba.dat_rq = qdesc;
7353 /* Create the Queues needed for Flash Optimized Fabric operations */
7355 lpfc_fof_queue_create(phba);
7359 lpfc_sli4_queue_destroy(phba);
7364 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7365 * @phba: pointer to lpfc hba data structure.
7367 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7372 * -ENOMEM - No available memory
7373 * -EIO - The mailbox failed to complete successfully.
7376 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7381 lpfc_fof_queue_destroy(phba);
7383 if (phba->sli4_hba.hba_eq != NULL) {
7384 /* Release HBA event queue */
7385 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7386 if (phba->sli4_hba.hba_eq[idx] != NULL) {
7387 lpfc_sli4_queue_free(
7388 phba->sli4_hba.hba_eq[idx]);
7389 phba->sli4_hba.hba_eq[idx] = NULL;
7392 kfree(phba->sli4_hba.hba_eq);
7393 phba->sli4_hba.hba_eq = NULL;
7396 if (phba->sli4_hba.fcp_cq != NULL) {
7397 /* Release FCP completion queue */
7398 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7399 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7400 lpfc_sli4_queue_free(
7401 phba->sli4_hba.fcp_cq[idx]);
7402 phba->sli4_hba.fcp_cq[idx] = NULL;
7405 kfree(phba->sli4_hba.fcp_cq);
7406 phba->sli4_hba.fcp_cq = NULL;
7409 if (phba->sli4_hba.fcp_wq != NULL) {
7410 /* Release FCP work queue */
7411 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7412 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7413 lpfc_sli4_queue_free(
7414 phba->sli4_hba.fcp_wq[idx]);
7415 phba->sli4_hba.fcp_wq[idx] = NULL;
7418 kfree(phba->sli4_hba.fcp_wq);
7419 phba->sli4_hba.fcp_wq = NULL;
7422 /* Release FCP CQ mapping array */
7423 if (phba->sli4_hba.fcp_cq_map != NULL) {
7424 kfree(phba->sli4_hba.fcp_cq_map);
7425 phba->sli4_hba.fcp_cq_map = NULL;
7428 /* Release mailbox command work queue */
7429 if (phba->sli4_hba.mbx_wq != NULL) {
7430 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7431 phba->sli4_hba.mbx_wq = NULL;
7434 /* Release ELS work queue */
7435 if (phba->sli4_hba.els_wq != NULL) {
7436 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7437 phba->sli4_hba.els_wq = NULL;
7440 /* Release unsolicited receive queue */
7441 if (phba->sli4_hba.hdr_rq != NULL) {
7442 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7443 phba->sli4_hba.hdr_rq = NULL;
7445 if (phba->sli4_hba.dat_rq != NULL) {
7446 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7447 phba->sli4_hba.dat_rq = NULL;
7450 /* Release ELS complete queue */
7451 if (phba->sli4_hba.els_cq != NULL) {
7452 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7453 phba->sli4_hba.els_cq = NULL;
7456 /* Release mailbox command complete queue */
7457 if (phba->sli4_hba.mbx_cq != NULL) {
7458 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7459 phba->sli4_hba.mbx_cq = NULL;
7466 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7467 * @phba: pointer to lpfc hba data structure.
7469 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7474 * -ENOMEM - No available memory
7475 * -EIO - The mailbox failed to complete successfully.
7478 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7480 struct lpfc_sli *psli = &phba->sli;
7481 struct lpfc_sli_ring *pring;
7483 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7484 int fcp_cq_index = 0;
7485 uint32_t shdr_status, shdr_add_status;
7486 union lpfc_sli4_cfg_shdr *shdr;
7487 LPFC_MBOXQ_t *mboxq;
7490 /* Check for dual-ULP support */
7491 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7494 "3249 Unable to allocate memory for "
7495 "QUERY_FW_CFG mailbox command\n");
7498 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7499 sizeof(struct lpfc_sli4_cfg_mhdr));
7500 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7501 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7502 length, LPFC_SLI4_MBX_EMBED);
7504 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7506 shdr = (union lpfc_sli4_cfg_shdr *)
7507 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7508 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7509 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7510 if (shdr_status || shdr_add_status || rc) {
7511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7512 "3250 QUERY_FW_CFG mailbox failed with status "
7513 "x%x add_status x%x, mbx status x%x\n",
7514 shdr_status, shdr_add_status, rc);
7515 if (rc != MBX_TIMEOUT)
7516 mempool_free(mboxq, phba->mbox_mem_pool);
7521 phba->sli4_hba.fw_func_mode =
7522 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7523 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7524 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7525 phba->sli4_hba.physical_port =
7526 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
7527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7528 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7529 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7530 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7532 if (rc != MBX_TIMEOUT)
7533 mempool_free(mboxq, phba->mbox_mem_pool);
7536 * Set up HBA Event Queues (EQs)
7539 /* Set up HBA event queue */
7540 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7542 "3147 Fast-path EQs not allocated\n");
7546 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7547 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7549 "0522 Fast-path EQ (%d) not "
7550 "allocated\n", fcp_eqidx);
7552 goto out_destroy_hba_eq;
7554 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7555 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7558 "0523 Failed setup of fast-path EQ "
7559 "(%d), rc = 0x%x\n", fcp_eqidx,
7561 goto out_destroy_hba_eq;
7563 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7564 "2584 HBA EQ setup: "
7565 "queue[%d]-id=%d\n", fcp_eqidx,
7566 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7569 /* Set up fast-path FCP Response Complete Queue */
7570 if (!phba->sli4_hba.fcp_cq) {
7571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7572 "3148 Fast-path FCP CQ array not "
7575 goto out_destroy_hba_eq;
7578 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7579 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7581 "0526 Fast-path FCP CQ (%d) not "
7582 "allocated\n", fcp_cqidx);
7584 goto out_destroy_fcp_cq;
7586 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7587 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7590 "0527 Failed setup of fast-path FCP "
7591 "CQ (%d), rc = 0x%x\n", fcp_cqidx,
7593 goto out_destroy_fcp_cq;
7596 /* Setup fcp_cq_map for fast lookup */
7597 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7598 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7600 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7601 "2588 FCP CQ setup: cq[%d]-id=%d, "
7602 "parent seq[%d]-id=%d\n",
7604 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7606 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7609 /* Set up fast-path FCP Work Queue */
7610 if (!phba->sli4_hba.fcp_wq) {
7611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7612 "3149 Fast-path FCP WQ array not "
7615 goto out_destroy_fcp_cq;
7618 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7619 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7621 "0534 Fast-path FCP WQ (%d) not "
7622 "allocated\n", fcp_wqidx);
7624 goto out_destroy_fcp_wq;
7626 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7627 phba->sli4_hba.fcp_cq[fcp_wqidx],
7630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7631 "0535 Failed setup of fast-path FCP "
7632 "WQ (%d), rc = 0x%x\n", fcp_wqidx,
7634 goto out_destroy_fcp_wq;
7637 /* Bind this WQ to the next FCP ring */
7638 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7639 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7640 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7642 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7643 "2591 FCP WQ setup: wq[%d]-id=%d, "
7644 "parent cq[%d]-id=%d\n",
7646 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7648 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7651 * Set up Complete Queues (CQs)
7654 /* Set up slow-path MBOX Complete Queue as the first CQ */
7655 if (!phba->sli4_hba.mbx_cq) {
7656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7657 "0528 Mailbox CQ not allocated\n");
7659 goto out_destroy_fcp_wq;
7661 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7662 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7665 "0529 Failed setup of slow-path mailbox CQ: "
7666 "rc = 0x%x\n", (uint32_t)rc);
7667 goto out_destroy_fcp_wq;
7669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7670 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7671 phba->sli4_hba.mbx_cq->queue_id,
7672 phba->sli4_hba.hba_eq[0]->queue_id);
7674 /* Set up slow-path ELS Complete Queue */
7675 if (!phba->sli4_hba.els_cq) {
7676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7677 "0530 ELS CQ not allocated\n");
7679 goto out_destroy_mbx_cq;
7681 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7682 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7685 "0531 Failed setup of slow-path ELS CQ: "
7686 "rc = 0x%x\n", (uint32_t)rc);
7687 goto out_destroy_mbx_cq;
7689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7690 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7691 phba->sli4_hba.els_cq->queue_id,
7692 phba->sli4_hba.hba_eq[0]->queue_id);
7695 * Set up all the Work Queues (WQs)
7698 /* Set up Mailbox Command Queue */
7699 if (!phba->sli4_hba.mbx_wq) {
7700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7701 "0538 Slow-path MQ not allocated\n");
7703 goto out_destroy_els_cq;
7705 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7706 phba->sli4_hba.mbx_cq, LPFC_MBOX);
7708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7709 "0539 Failed setup of slow-path MQ: "
7711 goto out_destroy_els_cq;
7713 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7714 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7715 phba->sli4_hba.mbx_wq->queue_id,
7716 phba->sli4_hba.mbx_cq->queue_id);
7718 /* Set up slow-path ELS Work Queue */
7719 if (!phba->sli4_hba.els_wq) {
7720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7721 "0536 Slow-path ELS WQ not allocated\n");
7723 goto out_destroy_mbx_wq;
7725 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7726 phba->sli4_hba.els_cq, LPFC_ELS);
7728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7729 "0537 Failed setup of slow-path ELS WQ: "
7730 "rc = 0x%x\n", (uint32_t)rc);
7731 goto out_destroy_mbx_wq;
7734 /* Bind this WQ to the ELS ring */
7735 pring = &psli->ring[LPFC_ELS_RING];
7736 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7737 phba->sli4_hba.els_cq->pring = pring;
7739 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7740 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7741 phba->sli4_hba.els_wq->queue_id,
7742 phba->sli4_hba.els_cq->queue_id);
7745 * Create Receive Queue (RQ)
7747 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7749 "0540 Receive Queue not allocated\n");
7751 goto out_destroy_els_wq;
7754 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7755 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7757 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7758 phba->sli4_hba.els_cq, LPFC_USOL);
7760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7761 "0541 Failed setup of Receive Queue: "
7762 "rc = 0x%x\n", (uint32_t)rc);
7763 goto out_destroy_fcp_wq;
7766 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7767 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7768 "parent cq-id=%d\n",
7769 phba->sli4_hba.hdr_rq->queue_id,
7770 phba->sli4_hba.dat_rq->queue_id,
7771 phba->sli4_hba.els_cq->queue_id);
7773 if (phba->cfg_fof) {
7774 rc = lpfc_fof_queue_setup(phba);
7776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7777 "0549 Failed setup of FOF Queues: "
7779 goto out_destroy_els_rq;
7784 * Configure EQ delay multipier for interrupt coalescing using
7785 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
7787 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
7788 fcp_eqidx += LPFC_MAX_EQ_DELAY)
7789 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
7793 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7795 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7797 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7799 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7801 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7803 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7804 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7806 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7807 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7809 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7810 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7816 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7817 * @phba: pointer to lpfc hba data structure.
7819 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7824 * -ENOMEM - No available memory
7825 * -EIO - The mailbox failed to complete successfully.
7828 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7832 /* Unset the queues created for Flash Optimized Fabric operations */
7834 lpfc_fof_queue_destroy(phba);
7835 /* Unset mailbox command work queue */
7836 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7837 /* Unset ELS work queue */
7838 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7839 /* Unset unsolicited receive queue */
7840 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7841 /* Unset FCP work queue */
7842 if (phba->sli4_hba.fcp_wq) {
7843 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7845 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7847 /* Unset mailbox command complete queue */
7848 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7849 /* Unset ELS complete queue */
7850 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7851 /* Unset FCP response complete queue */
7852 if (phba->sli4_hba.fcp_cq) {
7853 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7855 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7857 /* Unset fast-path event queue */
7858 if (phba->sli4_hba.hba_eq) {
7859 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7861 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7866 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
7867 * @phba: pointer to lpfc hba data structure.
7869 * This routine is invoked to allocate and set up a pool of completion queue
7870 * events. The body of the completion queue event is a completion queue entry
7871 * CQE. For now, this pool is used for the interrupt service routine to queue
7872 * the following HBA completion queue events for the worker thread to process:
7873 * - Mailbox asynchronous events
7874 * - Receive queue completion unsolicited events
7875 * Later, this can be used for all the slow-path events.
7879 * -ENOMEM - No available memory
7882 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7884 struct lpfc_cq_event *cq_event;
7887 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7888 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7890 goto out_pool_create_fail;
7891 list_add_tail(&cq_event->list,
7892 &phba->sli4_hba.sp_cqe_event_pool);
7896 out_pool_create_fail:
7897 lpfc_sli4_cq_event_pool_destroy(phba);
7902 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
7903 * @phba: pointer to lpfc hba data structure.
7905 * This routine is invoked to free the pool of completion queue events at
7906 * driver unload time. Note that, it is the responsibility of the driver
7907 * cleanup routine to free all the outstanding completion-queue events
7908 * allocated from this pool back into the pool before invoking this routine
7909 * to destroy the pool.
7912 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7914 struct lpfc_cq_event *cq_event, *next_cq_event;
7916 list_for_each_entry_safe(cq_event, next_cq_event,
7917 &phba->sli4_hba.sp_cqe_event_pool, list) {
7918 list_del(&cq_event->list);
7924 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7925 * @phba: pointer to lpfc hba data structure.
7927 * This routine is the lock free version of the API invoked to allocate a
7928 * completion-queue event from the free pool.
7930 * Return: Pointer to the newly allocated completion-queue event if successful
7933 struct lpfc_cq_event *
7934 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7936 struct lpfc_cq_event *cq_event = NULL;
7938 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7939 struct lpfc_cq_event, list);
7944 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
7945 * @phba: pointer to lpfc hba data structure.
7947 * This routine is the lock version of the API invoked to allocate a
7948 * completion-queue event from the free pool.
7950 * Return: Pointer to the newly allocated completion-queue event if successful
7953 struct lpfc_cq_event *
7954 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7956 struct lpfc_cq_event *cq_event;
7957 unsigned long iflags;
7959 spin_lock_irqsave(&phba->hbalock, iflags);
7960 cq_event = __lpfc_sli4_cq_event_alloc(phba);
7961 spin_unlock_irqrestore(&phba->hbalock, iflags);
7966 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7967 * @phba: pointer to lpfc hba data structure.
7968 * @cq_event: pointer to the completion queue event to be freed.
7970 * This routine is the lock free version of the API invoked to release a
7971 * completion-queue event back into the free pool.
7974 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7975 struct lpfc_cq_event *cq_event)
7977 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7981 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
7982 * @phba: pointer to lpfc hba data structure.
7983 * @cq_event: pointer to the completion queue event to be freed.
7985 * This routine is the lock version of the API invoked to release a
7986 * completion-queue event back into the free pool.
7989 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7990 struct lpfc_cq_event *cq_event)
7992 unsigned long iflags;
7993 spin_lock_irqsave(&phba->hbalock, iflags);
7994 __lpfc_sli4_cq_event_release(phba, cq_event);
7995 spin_unlock_irqrestore(&phba->hbalock, iflags);
7999 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
8000 * @phba: pointer to lpfc hba data structure.
8002 * This routine is to free all the pending completion-queue events to the
8003 * back into the free pool for device reset.
8006 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
8009 struct lpfc_cq_event *cqe;
8010 unsigned long iflags;
8012 /* Retrieve all the pending WCQEs from pending WCQE lists */
8013 spin_lock_irqsave(&phba->hbalock, iflags);
8014 /* Pending FCP XRI abort events */
8015 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8017 /* Pending ELS XRI abort events */
8018 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8020 /* Pending asynnc events */
8021 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
8023 spin_unlock_irqrestore(&phba->hbalock, iflags);
8025 while (!list_empty(&cqelist)) {
8026 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
8027 lpfc_sli4_cq_event_release(phba, cqe);
8032 * lpfc_pci_function_reset - Reset pci function.
8033 * @phba: pointer to lpfc hba data structure.
8035 * This routine is invoked to request a PCI function reset. It will destroys
8036 * all resources assigned to the PCI function which originates this request.
8040 * -ENOMEM - No available memory
8041 * -EIO - The mailbox failed to complete successfully.
8044 lpfc_pci_function_reset(struct lpfc_hba *phba)
8046 LPFC_MBOXQ_t *mboxq;
8047 uint32_t rc = 0, if_type;
8048 uint32_t shdr_status, shdr_add_status;
8050 uint32_t port_reset = 0;
8051 union lpfc_sli4_cfg_shdr *shdr;
8052 struct lpfc_register reg_data;
8055 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8057 case LPFC_SLI_INTF_IF_TYPE_0:
8058 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8062 "0494 Unable to allocate memory for "
8063 "issuing SLI_FUNCTION_RESET mailbox "
8068 /* Setup PCI function reset mailbox-ioctl command */
8069 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8070 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
8071 LPFC_SLI4_MBX_EMBED);
8072 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8073 shdr = (union lpfc_sli4_cfg_shdr *)
8074 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8075 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8076 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
8078 if (rc != MBX_TIMEOUT)
8079 mempool_free(mboxq, phba->mbox_mem_pool);
8080 if (shdr_status || shdr_add_status || rc) {
8081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8082 "0495 SLI_FUNCTION_RESET mailbox "
8083 "failed with status x%x add_status x%x,"
8084 " mbx status x%x\n",
8085 shdr_status, shdr_add_status, rc);
8089 case LPFC_SLI_INTF_IF_TYPE_2:
8092 * Poll the Port Status Register and wait for RDY for
8093 * up to 30 seconds. If the port doesn't respond, treat
8096 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
8097 if (lpfc_readl(phba->sli4_hba.u.if_type2.
8098 STATUSregaddr, ®_data.word0)) {
8102 if (bf_get(lpfc_sliport_status_rdy, ®_data))
8107 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
8108 phba->work_status[0] = readl(
8109 phba->sli4_hba.u.if_type2.ERR1regaddr);
8110 phba->work_status[1] = readl(
8111 phba->sli4_hba.u.if_type2.ERR2regaddr);
8112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8113 "2890 Port not ready, port status reg "
8114 "0x%x error 1=0x%x, error 2=0x%x\n",
8116 phba->work_status[0],
8117 phba->work_status[1]);
8124 * Reset the port now
8127 bf_set(lpfc_sliport_ctrl_end, ®_data,
8128 LPFC_SLIPORT_LITTLE_ENDIAN);
8129 bf_set(lpfc_sliport_ctrl_ip, ®_data,
8130 LPFC_SLIPORT_INIT_PORT);
8131 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
8134 pci_read_config_word(phba->pcidev,
8135 PCI_DEVICE_ID, &devid);
8140 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
8146 case LPFC_SLI_INTF_IF_TYPE_1:
8152 /* Catch the not-ready port failure after a port reset. */
8154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8155 "3317 HBA not functional: IP Reset Failed "
8156 "try: echo fw_reset > board_mode\n");
8164 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
8165 * @phba: pointer to lpfc hba data structure.
8167 * This routine is invoked to set up the PCI device memory space for device
8168 * with SLI-4 interface spec.
8172 * other values - error
8175 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8177 struct pci_dev *pdev;
8178 unsigned long bar0map_len, bar1map_len, bar2map_len;
8179 int error = -ENODEV;
8182 /* Obtain PCI device reference */
8186 pdev = phba->pcidev;
8188 /* Set the device DMA mask size */
8189 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
8190 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
8191 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
8192 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
8198 * The BARs and register set definitions and offset locations are
8199 * dependent on the if_type.
8201 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
8202 &phba->sli4_hba.sli_intf.word0)) {
8206 /* There is no SLI3 failback for SLI4 devices. */
8207 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
8208 LPFC_SLI_INTF_VALID) {
8209 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8210 "2894 SLI_INTF reg contents invalid "
8211 "sli_intf reg 0x%x\n",
8212 phba->sli4_hba.sli_intf.word0);
8216 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8218 * Get the bus address of SLI4 device Bar regions and the
8219 * number of bytes required by each mapping. The mapping of the
8220 * particular PCI BARs regions is dependent on the type of
8223 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
8224 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
8225 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
8228 * Map SLI4 PCI Config Space Register base to a kernel virtual
8231 phba->sli4_hba.conf_regs_memmap_p =
8232 ioremap(phba->pci_bar0_map, bar0map_len);
8233 if (!phba->sli4_hba.conf_regs_memmap_p) {
8234 dev_printk(KERN_ERR, &pdev->dev,
8235 "ioremap failed for SLI4 PCI config "
8239 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
8240 /* Set up BAR0 PCI config space register memory map */
8241 lpfc_sli4_bar0_register_memmap(phba, if_type);
8243 phba->pci_bar0_map = pci_resource_start(pdev, 1);
8244 bar0map_len = pci_resource_len(pdev, 1);
8245 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8246 dev_printk(KERN_ERR, &pdev->dev,
8247 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
8250 phba->sli4_hba.conf_regs_memmap_p =
8251 ioremap(phba->pci_bar0_map, bar0map_len);
8252 if (!phba->sli4_hba.conf_regs_memmap_p) {
8253 dev_printk(KERN_ERR, &pdev->dev,
8254 "ioremap failed for SLI4 PCI config "
8258 lpfc_sli4_bar0_register_memmap(phba, if_type);
8261 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8262 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
8264 * Map SLI4 if type 0 HBA Control Register base to a kernel
8265 * virtual address and setup the registers.
8267 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8268 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8269 phba->sli4_hba.ctrl_regs_memmap_p =
8270 ioremap(phba->pci_bar1_map, bar1map_len);
8271 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8272 dev_printk(KERN_ERR, &pdev->dev,
8273 "ioremap failed for SLI4 HBA control registers.\n");
8274 goto out_iounmap_conf;
8276 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
8277 lpfc_sli4_bar1_register_memmap(phba);
8280 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8281 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8283 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8284 * virtual address and setup the registers.
8286 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8287 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8288 phba->sli4_hba.drbl_regs_memmap_p =
8289 ioremap(phba->pci_bar2_map, bar2map_len);
8290 if (!phba->sli4_hba.drbl_regs_memmap_p) {
8291 dev_printk(KERN_ERR, &pdev->dev,
8292 "ioremap failed for SLI4 HBA doorbell registers.\n");
8293 goto out_iounmap_ctrl;
8295 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8296 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8298 goto out_iounmap_all;
8304 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8306 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8308 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8314 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8315 * @phba: pointer to lpfc hba data structure.
8317 * This routine is invoked to unset the PCI device memory space for device
8318 * with SLI-4 interface spec.
8321 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8324 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8327 case LPFC_SLI_INTF_IF_TYPE_0:
8328 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8329 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8330 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8332 case LPFC_SLI_INTF_IF_TYPE_2:
8333 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8335 case LPFC_SLI_INTF_IF_TYPE_1:
8337 dev_printk(KERN_ERR, &phba->pcidev->dev,
8338 "FATAL - unsupported SLI4 interface type - %d\n",
8345 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8346 * @phba: pointer to lpfc hba data structure.
8348 * This routine is invoked to enable the MSI-X interrupt vectors to device
8349 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
8350 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
8351 * once invoked, enables either all or nothing, depending on the current
8352 * availability of PCI vector resources. The device driver is responsible
8353 * for calling the individual request_irq() to register each MSI-X vector
8354 * with a interrupt handler, which is done in this function. Note that
8355 * later when device is unloading, the driver should always call free_irq()
8356 * on all MSI-X vectors it has done request_irq() on before calling
8357 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8358 * will be left with MSI-X enabled and leaks its vectors.
8362 * other values - error
8365 lpfc_sli_enable_msix(struct lpfc_hba *phba)
8370 /* Set up MSI-X multi-message vectors */
8371 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8372 phba->msix_entries[i].entry = i;
8374 /* Configure MSI-X capability structure */
8375 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
8378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8379 "0420 PCI enable MSI-X failed (%d)\n", rc);
8382 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8384 "0477 MSI-X entry[%d]: vector=x%x "
8386 phba->msix_entries[i].vector,
8387 phba->msix_entries[i].entry);
8389 * Assign MSI-X vectors to interrupt handlers
8392 /* vector-0 is associated to slow-path handler */
8393 rc = request_irq(phba->msix_entries[0].vector,
8394 &lpfc_sli_sp_intr_handler, 0,
8395 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8397 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8398 "0421 MSI-X slow-path request_irq failed "
8403 /* vector-1 is associated to fast-path handler */
8404 rc = request_irq(phba->msix_entries[1].vector,
8405 &lpfc_sli_fp_intr_handler, 0,
8406 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8409 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8410 "0429 MSI-X fast-path request_irq failed "
8416 * Configure HBA MSI-X attention conditions to messages
8418 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8423 "0474 Unable to allocate memory for issuing "
8424 "MBOX_CONFIG_MSI command\n");
8427 rc = lpfc_config_msi(phba, pmb);
8430 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8431 if (rc != MBX_SUCCESS) {
8432 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8433 "0351 Config MSI mailbox command failed, "
8434 "mbxCmd x%x, mbxStatus x%x\n",
8435 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8439 /* Free memory allocated for mailbox command */
8440 mempool_free(pmb, phba->mbox_mem_pool);
8444 /* Free memory allocated for mailbox command */
8445 mempool_free(pmb, phba->mbox_mem_pool);
8448 /* free the irq already requested */
8449 free_irq(phba->msix_entries[1].vector, phba);
8452 /* free the irq already requested */
8453 free_irq(phba->msix_entries[0].vector, phba);
8456 /* Unconfigure MSI-X capability structure */
8457 pci_disable_msix(phba->pcidev);
8464 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8465 * @phba: pointer to lpfc hba data structure.
8467 * This routine is invoked to release the MSI-X vectors and then disable the
8468 * MSI-X interrupt mode to device with SLI-3 interface spec.
8471 lpfc_sli_disable_msix(struct lpfc_hba *phba)
8475 /* Free up MSI-X multi-message vectors */
8476 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8477 free_irq(phba->msix_entries[i].vector, phba);
8479 pci_disable_msix(phba->pcidev);
8485 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8486 * @phba: pointer to lpfc hba data structure.
8488 * This routine is invoked to enable the MSI interrupt mode to device with
8489 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8490 * enable the MSI vector. The device driver is responsible for calling the
8491 * request_irq() to register MSI vector with a interrupt the handler, which
8492 * is done in this function.
8496 * other values - error
8499 lpfc_sli_enable_msi(struct lpfc_hba *phba)
8503 rc = pci_enable_msi(phba->pcidev);
8505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8506 "0462 PCI enable MSI mode success.\n");
8508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8509 "0471 PCI enable MSI mode failed (%d)\n", rc);
8513 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8514 0, LPFC_DRIVER_NAME, phba);
8516 pci_disable_msi(phba->pcidev);
8517 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8518 "0478 MSI request_irq failed (%d)\n", rc);
8524 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8525 * @phba: pointer to lpfc hba data structure.
8527 * This routine is invoked to disable the MSI interrupt mode to device with
8528 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8529 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8530 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8534 lpfc_sli_disable_msi(struct lpfc_hba *phba)
8536 free_irq(phba->pcidev->irq, phba);
8537 pci_disable_msi(phba->pcidev);
8542 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8543 * @phba: pointer to lpfc hba data structure.
8545 * This routine is invoked to enable device interrupt and associate driver's
8546 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8547 * spec. Depends on the interrupt mode configured to the driver, the driver
8548 * will try to fallback from the configured interrupt mode to an interrupt
8549 * mode which is supported by the platform, kernel, and device in the order
8551 * MSI-X -> MSI -> IRQ.
8555 * other values - error
8558 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8560 uint32_t intr_mode = LPFC_INTR_ERROR;
8563 if (cfg_mode == 2) {
8564 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8565 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8567 /* Now, try to enable MSI-X interrupt mode */
8568 retval = lpfc_sli_enable_msix(phba);
8570 /* Indicate initialization to MSI-X mode */
8571 phba->intr_type = MSIX;
8577 /* Fallback to MSI if MSI-X initialization failed */
8578 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8579 retval = lpfc_sli_enable_msi(phba);
8581 /* Indicate initialization to MSI mode */
8582 phba->intr_type = MSI;
8587 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8588 if (phba->intr_type == NONE) {
8589 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8590 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8592 /* Indicate initialization to INTx mode */
8593 phba->intr_type = INTx;
8601 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8602 * @phba: pointer to lpfc hba data structure.
8604 * This routine is invoked to disable device interrupt and disassociate the
8605 * driver's interrupt handler(s) from interrupt vector(s) to device with
8606 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8607 * release the interrupt vector(s) for the message signaled interrupt.
8610 lpfc_sli_disable_intr(struct lpfc_hba *phba)
8612 /* Disable the currently initialized interrupt mode */
8613 if (phba->intr_type == MSIX)
8614 lpfc_sli_disable_msix(phba);
8615 else if (phba->intr_type == MSI)
8616 lpfc_sli_disable_msi(phba);
8617 else if (phba->intr_type == INTx)
8618 free_irq(phba->pcidev->irq, phba);
8620 /* Reset interrupt management states */
8621 phba->intr_type = NONE;
8622 phba->sli.slistat.sli_intr = 0;
8628 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8629 * @phba: pointer to lpfc hba data structure.
8631 * Find next available CPU to use for IRQ to CPU affinity.
8634 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8636 struct lpfc_vector_map_info *cpup;
8639 cpup = phba->sli4_hba.cpu_map;
8640 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8641 /* CPU must be online */
8642 if (cpu_online(cpu)) {
8643 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8644 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8645 (cpup->phys_id == phys_id)) {
8653 * If we get here, we have used ALL CPUs for the specific
8654 * phys_id. Now we need to clear out lpfc_used_cpu and start
8658 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8659 if (lpfc_used_cpu[cpu] == phys_id)
8660 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8663 cpup = phba->sli4_hba.cpu_map;
8664 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8665 /* CPU must be online */
8666 if (cpu_online(cpu)) {
8667 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8668 (cpup->phys_id == phys_id)) {
8674 return LPFC_VECTOR_MAP_EMPTY;
8678 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8679 * @phba: pointer to lpfc hba data structure.
8680 * @vectors: number of HBA vectors
8682 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8683 * affinization across multple physical CPUs (numa nodes).
8684 * In addition, this routine will assign an IO channel for each CPU
8685 * to use when issuing I/Os.
8688 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8690 int i, idx, saved_chann, used_chann, cpu, phys_id;
8691 int max_phys_id, min_phys_id;
8692 int num_io_channel, first_cpu, chan;
8693 struct lpfc_vector_map_info *cpup;
8695 struct cpuinfo_x86 *cpuinfo;
8697 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8699 /* If there is no mapping, just return */
8700 if (!phba->cfg_fcp_cpu_map)
8703 /* Init cpu_map array */
8704 memset(phba->sli4_hba.cpu_map, 0xff,
8705 (sizeof(struct lpfc_vector_map_info) *
8706 phba->sli4_hba.num_present_cpu));
8712 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8714 /* Update CPU map with physical id and core id of each CPU */
8715 cpup = phba->sli4_hba.cpu_map;
8716 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8718 cpuinfo = &cpu_data(cpu);
8719 cpup->phys_id = cpuinfo->phys_proc_id;
8720 cpup->core_id = cpuinfo->cpu_core_id;
8722 /* No distinction between CPUs for other platforms */
8727 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8728 "3328 CPU physid %d coreid %d\n",
8729 cpup->phys_id, cpup->core_id);
8731 if (cpup->phys_id > max_phys_id)
8732 max_phys_id = cpup->phys_id;
8733 if (cpup->phys_id < min_phys_id)
8734 min_phys_id = cpup->phys_id;
8738 phys_id = min_phys_id;
8739 /* Now associate the HBA vectors with specific CPUs */
8740 for (idx = 0; idx < vectors; idx++) {
8741 cpup = phba->sli4_hba.cpu_map;
8742 cpu = lpfc_find_next_cpu(phba, phys_id);
8743 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8745 /* Try for all phys_id's */
8746 for (i = 1; i < max_phys_id; i++) {
8748 if (phys_id > max_phys_id)
8749 phys_id = min_phys_id;
8750 cpu = lpfc_find_next_cpu(phba, phys_id);
8751 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8756 /* Use round robin for scheduling */
8757 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
8759 cpup = phba->sli4_hba.cpu_map;
8760 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
8761 cpup->channel_id = chan;
8764 if (chan >= phba->cfg_fcp_io_channel)
8768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8769 "3329 Cannot set affinity:"
8770 "Error mapping vector %d (%d)\n",
8776 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8777 lpfc_used_cpu[cpu] = phys_id;
8779 /* Associate vector with selected CPU */
8780 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8782 /* Associate IO channel with selected CPU */
8783 cpup->channel_id = idx;
8786 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8789 /* Now affinitize to the selected CPU */
8790 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8791 vector, get_cpu_mask(cpu));
8793 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8794 "3330 Set Affinity: CPU %d channel %d "
8796 cpu, cpup->channel_id,
8797 phba->sli4_hba.msix_entries[idx].vector, i);
8799 /* Spread vector mapping across multple physical CPU nodes */
8801 if (phys_id > max_phys_id)
8802 phys_id = min_phys_id;
8806 * Finally fill in the IO channel for any remaining CPUs.
8807 * At this point, all IO channels have been assigned to a specific
8808 * MSIx vector, mapped to a specific CPU.
8809 * Base the remaining IO channel assigned, to IO channels already
8810 * assigned to other CPUs on the same phys_id.
8812 for (i = min_phys_id; i <= max_phys_id; i++) {
8814 * If there are no io channels already mapped to
8815 * this phys_id, just round robin thru the io_channels.
8816 * Setup chann[] for round robin.
8818 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8825 * First build a list of IO channels already assigned
8826 * to this phys_id before reassigning the same IO
8827 * channels to the remaining CPUs.
8829 cpup = phba->sli4_hba.cpu_map;
8832 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8834 if (cpup->phys_id == i) {
8836 * Save any IO channels that are
8837 * already mapped to this phys_id.
8839 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8841 LPFC_FCP_IO_CHAN_MAX) {
8842 chann[saved_chann] =
8849 /* See if we are using round-robin */
8850 if (saved_chann == 0)
8852 phba->cfg_fcp_io_channel;
8854 /* Associate next IO channel with CPU */
8855 cpup->channel_id = chann[used_chann];
8858 if (used_chann == saved_chann)
8861 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8862 "3331 Set IO_CHANN "
8863 "CPU %d channel %d\n",
8864 idx, cpup->channel_id);
8868 if (cpu >= phba->sli4_hba.num_present_cpu) {
8869 cpup = phba->sli4_hba.cpu_map;
8877 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8878 cpup = phba->sli4_hba.cpu_map;
8879 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8880 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8881 cpup->channel_id = 0;
8884 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8885 "3332 Assign IO_CHANN "
8886 "CPU %d channel %d\n",
8887 idx, cpup->channel_id);
8894 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8896 "3333 Set affinity mismatch:"
8897 "%d chann != %d cpus: %d vectors\n",
8898 num_io_channel, phba->sli4_hba.num_present_cpu,
8901 /* Enable using cpu affinity for scheduling */
8902 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8908 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
8909 * @phba: pointer to lpfc hba data structure.
8911 * This routine is invoked to enable the MSI-X interrupt vectors to device
8912 * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
8913 * is called to enable the MSI-X vectors. The device driver is responsible
8914 * for calling the individual request_irq() to register each MSI-X vector
8915 * with a interrupt handler, which is done in this function. Note that
8916 * later when device is unloading, the driver should always call free_irq()
8917 * on all MSI-X vectors it has done request_irq() on before calling
8918 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8919 * will be left with MSI-X enabled and leaks its vectors.
8923 * other values - error
8926 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8928 int vectors, rc, index;
8930 /* Set up MSI-X multi-message vectors */
8931 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8932 phba->sli4_hba.msix_entries[index].entry = index;
8934 /* Configure MSI-X capability structure */
8935 vectors = phba->cfg_fcp_io_channel;
8936 if (phba->cfg_fof) {
8937 phba->sli4_hba.msix_entries[index].entry = index;
8940 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
8943 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8944 "0484 PCI enable MSI-X failed (%d)\n", rc);
8949 /* Log MSI-X vector assignment */
8950 for (index = 0; index < vectors; index++)
8951 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8952 "0489 MSI-X entry[%d]: vector=x%x "
8953 "message=%d\n", index,
8954 phba->sli4_hba.msix_entries[index].vector,
8955 phba->sli4_hba.msix_entries[index].entry);
8957 /* Assign MSI-X vectors to interrupt handlers */
8958 for (index = 0; index < vectors; index++) {
8959 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8960 snprintf((char *)&phba->sli4_hba.handler_name[index],
8961 LPFC_SLI4_HANDLER_NAME_SZ,
8962 LPFC_DRIVER_HANDLER_NAME"%d", index);
8964 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8965 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8966 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8967 if (phba->cfg_fof && (index == (vectors - 1)))
8969 phba->sli4_hba.msix_entries[index].vector,
8970 &lpfc_sli4_fof_intr_handler, 0,
8971 (char *)&phba->sli4_hba.handler_name[index],
8972 &phba->sli4_hba.fcp_eq_hdl[index]);
8975 phba->sli4_hba.msix_entries[index].vector,
8976 &lpfc_sli4_hba_intr_handler, 0,
8977 (char *)&phba->sli4_hba.handler_name[index],
8978 &phba->sli4_hba.fcp_eq_hdl[index]);
8980 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8981 "0486 MSI-X fast-path (%d) "
8982 "request_irq failed (%d)\n", index, rc);
8990 if (vectors != phba->cfg_fcp_io_channel) {
8991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8992 "3238 Reducing IO channels to match number of "
8993 "MSI-X vectors, requested %d got %d\n",
8994 phba->cfg_fcp_io_channel, vectors);
8995 phba->cfg_fcp_io_channel = vectors;
8998 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
8999 lpfc_sli4_set_affinity(phba, vectors);
9003 /* free the irq already requested */
9004 for (--index; index >= 0; index--) {
9005 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
9007 free_irq(phba->sli4_hba.msix_entries[index].vector,
9008 &phba->sli4_hba.fcp_eq_hdl[index]);
9011 /* Unconfigure MSI-X capability structure */
9012 pci_disable_msix(phba->pcidev);
9019 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
9020 * @phba: pointer to lpfc hba data structure.
9022 * This routine is invoked to release the MSI-X vectors and then disable the
9023 * MSI-X interrupt mode to device with SLI-4 interface spec.
9026 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
9030 /* Free up MSI-X multi-message vectors */
9031 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
9032 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
9034 free_irq(phba->sli4_hba.msix_entries[index].vector,
9035 &phba->sli4_hba.fcp_eq_hdl[index]);
9037 if (phba->cfg_fof) {
9038 free_irq(phba->sli4_hba.msix_entries[index].vector,
9039 &phba->sli4_hba.fcp_eq_hdl[index]);
9042 pci_disable_msix(phba->pcidev);
9048 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
9049 * @phba: pointer to lpfc hba data structure.
9051 * This routine is invoked to enable the MSI interrupt mode to device with
9052 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
9053 * to enable the MSI vector. The device driver is responsible for calling
9054 * the request_irq() to register MSI vector with a interrupt the handler,
9055 * which is done in this function.
9059 * other values - error
9062 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
9066 rc = pci_enable_msi(phba->pcidev);
9068 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9069 "0487 PCI enable MSI mode success.\n");
9071 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9072 "0488 PCI enable MSI mode failed (%d)\n", rc);
9076 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9077 0, LPFC_DRIVER_NAME, phba);
9079 pci_disable_msi(phba->pcidev);
9080 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9081 "0490 MSI request_irq failed (%d)\n", rc);
9085 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
9086 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9087 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9090 if (phba->cfg_fof) {
9091 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9092 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9098 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
9099 * @phba: pointer to lpfc hba data structure.
9101 * This routine is invoked to disable the MSI interrupt mode to device with
9102 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
9103 * done request_irq() on before calling pci_disable_msi(). Failure to do so
9104 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
9108 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
9110 free_irq(phba->pcidev->irq, phba);
9111 pci_disable_msi(phba->pcidev);
9116 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
9117 * @phba: pointer to lpfc hba data structure.
9119 * This routine is invoked to enable device interrupt and associate driver's
9120 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
9121 * interface spec. Depends on the interrupt mode configured to the driver,
9122 * the driver will try to fallback from the configured interrupt mode to an
9123 * interrupt mode which is supported by the platform, kernel, and device in
9125 * MSI-X -> MSI -> IRQ.
9129 * other values - error
9132 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9134 uint32_t intr_mode = LPFC_INTR_ERROR;
9137 if (cfg_mode == 2) {
9138 /* Preparation before conf_msi mbox cmd */
9141 /* Now, try to enable MSI-X interrupt mode */
9142 retval = lpfc_sli4_enable_msix(phba);
9144 /* Indicate initialization to MSI-X mode */
9145 phba->intr_type = MSIX;
9151 /* Fallback to MSI if MSI-X initialization failed */
9152 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9153 retval = lpfc_sli4_enable_msi(phba);
9155 /* Indicate initialization to MSI mode */
9156 phba->intr_type = MSI;
9161 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9162 if (phba->intr_type == NONE) {
9163 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9164 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9166 /* Indicate initialization to INTx mode */
9167 phba->intr_type = INTx;
9169 for (index = 0; index < phba->cfg_fcp_io_channel;
9171 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9172 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9173 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9176 if (phba->cfg_fof) {
9177 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9178 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9179 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9188 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9189 * @phba: pointer to lpfc hba data structure.
9191 * This routine is invoked to disable device interrupt and disassociate
9192 * the driver's interrupt handler(s) from interrupt vector(s) to device
9193 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9194 * will release the interrupt vector(s) for the message signaled interrupt.
9197 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
9199 /* Disable the currently initialized interrupt mode */
9200 if (phba->intr_type == MSIX)
9201 lpfc_sli4_disable_msix(phba);
9202 else if (phba->intr_type == MSI)
9203 lpfc_sli4_disable_msi(phba);
9204 else if (phba->intr_type == INTx)
9205 free_irq(phba->pcidev->irq, phba);
9207 /* Reset interrupt management states */
9208 phba->intr_type = NONE;
9209 phba->sli.slistat.sli_intr = 0;
9215 * lpfc_unset_hba - Unset SLI3 hba device initialization
9216 * @phba: pointer to lpfc hba data structure.
9218 * This routine is invoked to unset the HBA device initialization steps to
9219 * a device with SLI-3 interface spec.
9222 lpfc_unset_hba(struct lpfc_hba *phba)
9224 struct lpfc_vport *vport = phba->pport;
9225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9227 spin_lock_irq(shost->host_lock);
9228 vport->load_flag |= FC_UNLOADING;
9229 spin_unlock_irq(shost->host_lock);
9231 kfree(phba->vpi_bmask);
9232 kfree(phba->vpi_ids);
9234 lpfc_stop_hba_timers(phba);
9236 phba->pport->work_port_events = 0;
9238 lpfc_sli_hba_down(phba);
9240 lpfc_sli_brdrestart(phba);
9242 lpfc_sli_disable_intr(phba);
9248 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9249 * @phba: Pointer to HBA context object.
9251 * This function is called in the SLI4 code path to wait for completion
9252 * of device's XRIs exchange busy. It will check the XRI exchange busy
9253 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9254 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9255 * I/Os every 30 seconds, log error message, and wait forever. Only when
9256 * all XRI exchange busy complete, the driver unload shall proceed with
9257 * invoking the function reset ioctl mailbox command to the CNA and the
9258 * the rest of the driver unload resource release.
9261 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
9264 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9265 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9267 while (!fcp_xri_cmpl || !els_xri_cmpl) {
9268 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
9270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9271 "2877 FCP XRI exchange busy "
9272 "wait time: %d seconds.\n",
9275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9276 "2878 ELS XRI exchange busy "
9277 "wait time: %d seconds.\n",
9279 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
9280 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
9282 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
9283 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
9286 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9288 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9293 * lpfc_sli4_hba_unset - Unset the fcoe hba
9294 * @phba: Pointer to HBA context object.
9296 * This function is called in the SLI4 code path to reset the HBA's FCoE
9297 * function. The caller is not required to hold any lock. This routine
9298 * issues PCI function reset mailbox command to reset the FCoE function.
9299 * At the end of the function, it calls lpfc_hba_down_post function to
9300 * free any pending commands.
9303 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
9306 LPFC_MBOXQ_t *mboxq;
9307 struct pci_dev *pdev = phba->pcidev;
9309 lpfc_stop_hba_timers(phba);
9310 phba->sli4_hba.intr_enable = 0;
9313 * Gracefully wait out the potential current outstanding asynchronous
9317 /* First, block any pending async mailbox command from posted */
9318 spin_lock_irq(&phba->hbalock);
9319 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9320 spin_unlock_irq(&phba->hbalock);
9321 /* Now, trying to wait it out if we can */
9322 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9324 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
9327 /* Forcefully release the outstanding mailbox command if timed out */
9328 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9329 spin_lock_irq(&phba->hbalock);
9330 mboxq = phba->sli.mbox_active;
9331 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9332 __lpfc_mbox_cmpl_put(phba, mboxq);
9333 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9334 phba->sli.mbox_active = NULL;
9335 spin_unlock_irq(&phba->hbalock);
9338 /* Abort all iocbs associated with the hba */
9339 lpfc_sli_hba_iocb_abort(phba);
9341 /* Wait for completion of device XRI exchange busy */
9342 lpfc_sli4_xri_exchange_busy_wait(phba);
9344 /* Disable PCI subsystem interrupt */
9345 lpfc_sli4_disable_intr(phba);
9347 /* Disable SR-IOV if enabled */
9348 if (phba->cfg_sriov_nr_virtfn)
9349 pci_disable_sriov(pdev);
9351 /* Stop kthread signal shall trigger work_done one more time */
9352 kthread_stop(phba->worker_thread);
9354 /* Reset SLI4 HBA FCoE function */
9355 lpfc_pci_function_reset(phba);
9356 lpfc_sli4_queue_destroy(phba);
9358 /* Stop the SLI4 device port */
9359 phba->pport->work_port_events = 0;
9363 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9364 * @phba: Pointer to HBA context object.
9365 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9367 * This function is called in the SLI4 code path to read the port's
9368 * sli4 capabilities.
9370 * This function may be be called from any context that can block-wait
9371 * for the completion. The expectation is that this routine is called
9372 * typically from probe_one or from the online routine.
9375 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9378 struct lpfc_mqe *mqe;
9379 struct lpfc_pc_sli4_params *sli4_params;
9383 mqe = &mboxq->u.mqe;
9385 /* Read the port's SLI4 Parameters port capabilities */
9386 lpfc_pc_sli4_params(mboxq);
9387 if (!phba->sli4_hba.intr_enable)
9388 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9390 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9391 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9397 sli4_params = &phba->sli4_hba.pc_sli4_params;
9398 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
9399 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
9400 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
9401 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
9402 &mqe->un.sli4_params);
9403 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
9404 &mqe->un.sli4_params);
9405 sli4_params->proto_types = mqe->un.sli4_params.word3;
9406 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
9407 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
9408 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
9409 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
9410 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
9411 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
9412 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
9413 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
9414 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
9415 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
9416 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
9417 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
9418 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
9419 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
9420 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
9421 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
9422 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
9423 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
9424 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
9425 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
9427 /* Make sure that sge_supp_len can be handled by the driver */
9428 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9429 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9435 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9436 * @phba: Pointer to HBA context object.
9437 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9439 * This function is called in the SLI4 code path to read the port's
9440 * sli4 capabilities.
9442 * This function may be be called from any context that can block-wait
9443 * for the completion. The expectation is that this routine is called
9444 * typically from probe_one or from the online routine.
9447 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9450 struct lpfc_mqe *mqe = &mboxq->u.mqe;
9451 struct lpfc_pc_sli4_params *sli4_params;
9454 struct lpfc_sli4_parameters *mbx_sli4_parameters;
9457 * By default, the driver assumes the SLI4 port requires RPI
9458 * header postings. The SLI4_PARAM response will correct this
9461 phba->sli4_hba.rpi_hdrs_in_use = 1;
9463 /* Read the port's SLI4 Config Parameters */
9464 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
9465 sizeof(struct lpfc_sli4_cfg_mhdr));
9466 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9467 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
9468 length, LPFC_SLI4_MBX_EMBED);
9469 if (!phba->sli4_hba.intr_enable)
9470 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9472 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9473 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9477 sli4_params = &phba->sli4_hba.pc_sli4_params;
9478 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
9479 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
9480 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
9481 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
9482 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
9483 mbx_sli4_parameters);
9484 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
9485 mbx_sli4_parameters);
9486 if (bf_get(cfg_phwq, mbx_sli4_parameters))
9487 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
9489 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9490 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9491 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9492 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
9493 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9494 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9495 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9496 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
9497 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
9498 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9499 mbx_sli4_parameters);
9500 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
9501 mbx_sli4_parameters);
9502 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
9503 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
9505 /* Make sure that sge_supp_len can be handled by the driver */
9506 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9507 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9513 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9514 * @pdev: pointer to PCI device
9515 * @pid: pointer to PCI device identifier
9517 * This routine is to be called to attach a device with SLI-3 interface spec
9518 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9519 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9520 * information of the device and driver to see if the driver state that it can
9521 * support this kind of device. If the match is successful, the driver core
9522 * invokes this routine. If this routine determines it can claim the HBA, it
9523 * does all the initialization that it needs to do to handle the HBA properly.
9526 * 0 - driver can claim the device
9527 * negative value - driver can not claim the device
9530 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
9532 struct lpfc_hba *phba;
9533 struct lpfc_vport *vport = NULL;
9534 struct Scsi_Host *shost = NULL;
9536 uint32_t cfg_mode, intr_mode;
9538 /* Allocate memory for HBA structure */
9539 phba = lpfc_hba_alloc(pdev);
9543 /* Perform generic PCI device enabling operation */
9544 error = lpfc_enable_pci_dev(phba);
9548 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9549 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
9551 goto out_disable_pci_dev;
9553 /* Set up SLI-3 specific device PCI memory space */
9554 error = lpfc_sli_pci_mem_setup(phba);
9556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9557 "1402 Failed to set up pci memory space.\n");
9558 goto out_disable_pci_dev;
9561 /* Set up phase-1 common device driver resources */
9562 error = lpfc_setup_driver_resource_phase1(phba);
9564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9565 "1403 Failed to set up driver resource.\n");
9566 goto out_unset_pci_mem_s3;
9569 /* Set up SLI-3 specific device driver resources */
9570 error = lpfc_sli_driver_resource_setup(phba);
9572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9573 "1404 Failed to set up driver resource.\n");
9574 goto out_unset_pci_mem_s3;
9577 /* Initialize and populate the iocb list per host */
9578 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
9580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9581 "1405 Failed to initialize iocb list.\n");
9582 goto out_unset_driver_resource_s3;
9585 /* Set up common device driver resources */
9586 error = lpfc_setup_driver_resource_phase2(phba);
9588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9589 "1406 Failed to set up driver resource.\n");
9590 goto out_free_iocb_list;
9593 /* Get the default values for Model Name and Description */
9594 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9596 /* Create SCSI host to the physical port */
9597 error = lpfc_create_shost(phba);
9599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9600 "1407 Failed to create scsi host.\n");
9601 goto out_unset_driver_resource;
9604 /* Configure sysfs attributes */
9605 vport = phba->pport;
9606 error = lpfc_alloc_sysfs_attr(vport);
9608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9609 "1476 Failed to allocate sysfs attr\n");
9610 goto out_destroy_shost;
9613 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9614 /* Now, trying to enable interrupt and bring up the device */
9615 cfg_mode = phba->cfg_use_msi;
9617 /* Put device to a known state before enabling interrupt */
9618 lpfc_stop_port(phba);
9619 /* Configure and enable interrupt */
9620 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
9621 if (intr_mode == LPFC_INTR_ERROR) {
9622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9623 "0431 Failed to enable interrupt.\n");
9625 goto out_free_sysfs_attr;
9627 /* SLI-3 HBA setup */
9628 if (lpfc_sli_hba_setup(phba)) {
9629 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9630 "1477 Failed to set up hba\n");
9632 goto out_remove_device;
9635 /* Wait 50ms for the interrupts of previous mailbox commands */
9637 /* Check active interrupts on message signaled interrupts */
9638 if (intr_mode == 0 ||
9639 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
9640 /* Log the current active interrupt mode */
9641 phba->intr_mode = intr_mode;
9642 lpfc_log_intr_mode(phba, intr_mode);
9645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9646 "0447 Configure interrupt mode (%d) "
9647 "failed active interrupt test.\n",
9649 /* Disable the current interrupt mode */
9650 lpfc_sli_disable_intr(phba);
9651 /* Try next level of interrupt mode */
9652 cfg_mode = --intr_mode;
9656 /* Perform post initialization setup */
9657 lpfc_post_init_setup(phba);
9659 /* Check if there are static vports to be created. */
9660 lpfc_create_static_vport(phba);
9665 lpfc_unset_hba(phba);
9666 out_free_sysfs_attr:
9667 lpfc_free_sysfs_attr(vport);
9669 lpfc_destroy_shost(phba);
9670 out_unset_driver_resource:
9671 lpfc_unset_driver_resource_phase2(phba);
9673 lpfc_free_iocb_list(phba);
9674 out_unset_driver_resource_s3:
9675 lpfc_sli_driver_resource_unset(phba);
9676 out_unset_pci_mem_s3:
9677 lpfc_sli_pci_mem_unset(phba);
9678 out_disable_pci_dev:
9679 lpfc_disable_pci_dev(phba);
9681 scsi_host_put(shost);
9683 lpfc_hba_free(phba);
9688 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9689 * @pdev: pointer to PCI device
9691 * This routine is to be called to disattach a device with SLI-3 interface
9692 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9693 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9694 * device to be removed from the PCI subsystem properly.
9697 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
9699 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9700 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9701 struct lpfc_vport **vports;
9702 struct lpfc_hba *phba = vport->phba;
9704 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
9706 spin_lock_irq(&phba->hbalock);
9707 vport->load_flag |= FC_UNLOADING;
9708 spin_unlock_irq(&phba->hbalock);
9710 lpfc_free_sysfs_attr(vport);
9712 /* Release all the vports against this physical port */
9713 vports = lpfc_create_vport_work_array(phba);
9715 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9716 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9718 fc_vport_terminate(vports[i]->fc_vport);
9720 lpfc_destroy_vport_work_array(phba, vports);
9722 /* Remove FC host and then SCSI host with the physical port */
9723 fc_remove_host(shost);
9724 scsi_remove_host(shost);
9725 lpfc_cleanup(vport);
9728 * Bring down the SLI Layer. This step disable all interrupts,
9729 * clears the rings, discards all mailbox commands, and resets
9733 /* HBA interrupt will be disabled after this call */
9734 lpfc_sli_hba_down(phba);
9735 /* Stop kthread signal shall trigger work_done one more time */
9736 kthread_stop(phba->worker_thread);
9737 /* Final cleanup of txcmplq and reset the HBA */
9738 lpfc_sli_brdrestart(phba);
9740 kfree(phba->vpi_bmask);
9741 kfree(phba->vpi_ids);
9743 lpfc_stop_hba_timers(phba);
9744 spin_lock_irq(&phba->hbalock);
9745 list_del_init(&vport->listentry);
9746 spin_unlock_irq(&phba->hbalock);
9748 lpfc_debugfs_terminate(vport);
9750 /* Disable SR-IOV if enabled */
9751 if (phba->cfg_sriov_nr_virtfn)
9752 pci_disable_sriov(pdev);
9754 /* Disable interrupt */
9755 lpfc_sli_disable_intr(phba);
9757 scsi_host_put(shost);
9760 * Call scsi_free before mem_free since scsi bufs are released to their
9761 * corresponding pools here.
9763 lpfc_scsi_free(phba);
9764 lpfc_mem_free_all(phba);
9766 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9767 phba->hbqslimp.virt, phba->hbqslimp.phys);
9769 /* Free resources associated with SLI2 interface */
9770 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9771 phba->slim2p.virt, phba->slim2p.phys);
9773 /* unmap adapter SLIM and Control Registers */
9774 iounmap(phba->ctrl_regs_memmap_p);
9775 iounmap(phba->slim_memmap_p);
9777 lpfc_hba_free(phba);
9779 pci_release_selected_regions(pdev, bars);
9780 pci_disable_device(pdev);
9784 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9785 * @pdev: pointer to PCI device
9786 * @msg: power management message
9788 * This routine is to be called from the kernel's PCI subsystem to support
9789 * system Power Management (PM) to device with SLI-3 interface spec. When
9790 * PM invokes this method, it quiesces the device by stopping the driver's
9791 * worker thread for the device, turning off device's interrupt and DMA,
9792 * and bring the device offline. Note that as the driver implements the
9793 * minimum PM requirements to a power-aware driver's PM support for the
9794 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9795 * to the suspend() method call will be treated as SUSPEND and the driver will
9796 * fully reinitialize its device during resume() method call, the driver will
9797 * set device to PCI_D3hot state in PCI config space instead of setting it
9798 * according to the @msg provided by the PM.
9801 * 0 - driver suspended the device
9805 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9807 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9808 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9810 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9811 "0473 PCI device Power Management suspend.\n");
9813 /* Bring down the device */
9814 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9816 kthread_stop(phba->worker_thread);
9818 /* Disable interrupt from device */
9819 lpfc_sli_disable_intr(phba);
9821 /* Save device state to PCI config space */
9822 pci_save_state(pdev);
9823 pci_set_power_state(pdev, PCI_D3hot);
9829 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9830 * @pdev: pointer to PCI device
9832 * This routine is to be called from the kernel's PCI subsystem to support
9833 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9834 * invokes this method, it restores the device's PCI config space state and
9835 * fully reinitializes the device and brings it online. Note that as the
9836 * driver implements the minimum PM requirements to a power-aware driver's
9837 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9838 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9839 * driver will fully reinitialize its device during resume() method call,
9840 * the device will be set to PCI_D0 directly in PCI config space before
9841 * restoring the state.
9844 * 0 - driver suspended the device
9848 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9850 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9851 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9855 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9856 "0452 PCI device Power Management resume.\n");
9858 /* Restore device state from PCI config space */
9859 pci_set_power_state(pdev, PCI_D0);
9860 pci_restore_state(pdev);
9863 * As the new kernel behavior of pci_restore_state() API call clears
9864 * device saved_state flag, need to save the restored state again.
9866 pci_save_state(pdev);
9868 if (pdev->is_busmaster)
9869 pci_set_master(pdev);
9871 /* Startup the kernel thread for this host adapter. */
9872 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9873 "lpfc_worker_%d", phba->brd_no);
9874 if (IS_ERR(phba->worker_thread)) {
9875 error = PTR_ERR(phba->worker_thread);
9876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9877 "0434 PM resume failed to start worker "
9878 "thread: error=x%x.\n", error);
9882 /* Configure and enable interrupt */
9883 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9884 if (intr_mode == LPFC_INTR_ERROR) {
9885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9886 "0430 PM resume Failed to enable interrupt\n");
9889 phba->intr_mode = intr_mode;
9891 /* Restart HBA and bring it online */
9892 lpfc_sli_brdrestart(phba);
9895 /* Log the current active interrupt mode */
9896 lpfc_log_intr_mode(phba, phba->intr_mode);
9902 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
9903 * @phba: pointer to lpfc hba data structure.
9905 * This routine is called to prepare the SLI3 device for PCI slot recover. It
9906 * aborts all the outstanding SCSI I/Os to the pci device.
9909 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9912 "2723 PCI channel I/O abort preparing for recovery\n");
9915 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
9916 * and let the SCSI mid-layer to retry them to recover.
9918 lpfc_sli_abort_fcp_rings(phba);
9922 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
9923 * @phba: pointer to lpfc hba data structure.
9925 * This routine is called to prepare the SLI3 device for PCI slot reset. It
9926 * disables the device interrupt and pci device, and aborts the internal FCP
9930 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9933 "2710 PCI channel disable preparing for reset\n");
9935 /* Block any management I/Os to the device */
9936 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
9938 /* Block all SCSI devices' I/Os on the host */
9939 lpfc_scsi_dev_block(phba);
9941 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9942 lpfc_sli_flush_fcp_rings(phba);
9944 /* stop all timers */
9945 lpfc_stop_hba_timers(phba);
9947 /* Disable interrupt and pci device */
9948 lpfc_sli_disable_intr(phba);
9949 pci_disable_device(phba->pcidev);
9953 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
9954 * @phba: pointer to lpfc hba data structure.
9956 * This routine is called to prepare the SLI3 device for PCI slot permanently
9957 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9961 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9964 "2711 PCI channel permanent disable for failure\n");
9965 /* Block all SCSI devices' I/Os on the host */
9966 lpfc_scsi_dev_block(phba);
9968 /* stop all timers */
9969 lpfc_stop_hba_timers(phba);
9971 /* Clean up all driver's outstanding SCSI I/Os */
9972 lpfc_sli_flush_fcp_rings(phba);
9976 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
9977 * @pdev: pointer to PCI device.
9978 * @state: the current PCI connection state.
9980 * This routine is called from the PCI subsystem for I/O error handling to
9981 * device with SLI-3 interface spec. This function is called by the PCI
9982 * subsystem after a PCI bus error affecting this device has been detected.
9983 * When this function is invoked, it will need to stop all the I/Os and
9984 * interrupt(s) to the device. Once that is done, it will return
9985 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
9989 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
9990 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9991 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9993 static pci_ers_result_t
9994 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9996 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9997 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10000 case pci_channel_io_normal:
10001 /* Non-fatal error, prepare for recovery */
10002 lpfc_sli_prep_dev_for_recover(phba);
10003 return PCI_ERS_RESULT_CAN_RECOVER;
10004 case pci_channel_io_frozen:
10005 /* Fatal error, prepare for slot reset */
10006 lpfc_sli_prep_dev_for_reset(phba);
10007 return PCI_ERS_RESULT_NEED_RESET;
10008 case pci_channel_io_perm_failure:
10009 /* Permanent failure, prepare for device down */
10010 lpfc_sli_prep_dev_for_perm_failure(phba);
10011 return PCI_ERS_RESULT_DISCONNECT;
10013 /* Unknown state, prepare and request slot reset */
10014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10015 "0472 Unknown PCI error state: x%x\n", state);
10016 lpfc_sli_prep_dev_for_reset(phba);
10017 return PCI_ERS_RESULT_NEED_RESET;
10022 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
10023 * @pdev: pointer to PCI device.
10025 * This routine is called from the PCI subsystem for error handling to
10026 * device with SLI-3 interface spec. This is called after PCI bus has been
10027 * reset to restart the PCI card from scratch, as if from a cold-boot.
10028 * During the PCI subsystem error recovery, after driver returns
10029 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10030 * recovery and then call this routine before calling the .resume method
10031 * to recover the device. This function will initialize the HBA device,
10032 * enable the interrupt, but it will just put the HBA to offline state
10033 * without passing any I/O traffic.
10036 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10037 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10039 static pci_ers_result_t
10040 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
10042 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10043 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10044 struct lpfc_sli *psli = &phba->sli;
10045 uint32_t intr_mode;
10047 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10048 if (pci_enable_device_mem(pdev)) {
10049 printk(KERN_ERR "lpfc: Cannot re-enable "
10050 "PCI device after reset.\n");
10051 return PCI_ERS_RESULT_DISCONNECT;
10054 pci_restore_state(pdev);
10057 * As the new kernel behavior of pci_restore_state() API call clears
10058 * device saved_state flag, need to save the restored state again.
10060 pci_save_state(pdev);
10062 if (pdev->is_busmaster)
10063 pci_set_master(pdev);
10065 spin_lock_irq(&phba->hbalock);
10066 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10067 spin_unlock_irq(&phba->hbalock);
10069 /* Configure and enable interrupt */
10070 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10071 if (intr_mode == LPFC_INTR_ERROR) {
10072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10073 "0427 Cannot re-enable interrupt after "
10075 return PCI_ERS_RESULT_DISCONNECT;
10077 phba->intr_mode = intr_mode;
10079 /* Take device offline, it will perform cleanup */
10080 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10081 lpfc_offline(phba);
10082 lpfc_sli_brdrestart(phba);
10084 /* Log the current active interrupt mode */
10085 lpfc_log_intr_mode(phba, phba->intr_mode);
10087 return PCI_ERS_RESULT_RECOVERED;
10091 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
10092 * @pdev: pointer to PCI device
10094 * This routine is called from the PCI subsystem for error handling to device
10095 * with SLI-3 interface spec. It is called when kernel error recovery tells
10096 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10097 * error recovery. After this call, traffic can start to flow from this device
10101 lpfc_io_resume_s3(struct pci_dev *pdev)
10103 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10104 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10106 /* Bring device online, it will be no-op for non-fatal error resume */
10109 /* Clean up Advanced Error Reporting (AER) if needed */
10110 if (phba->hba_flag & HBA_AER_ENABLED)
10111 pci_cleanup_aer_uncorrect_error_status(pdev);
10115 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
10116 * @phba: pointer to lpfc hba data structure.
10118 * returns the number of ELS/CT IOCBs to reserve
10121 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
10123 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
10125 if (phba->sli_rev == LPFC_SLI_REV4) {
10126 if (max_xri <= 100)
10128 else if (max_xri <= 256)
10130 else if (max_xri <= 512)
10132 else if (max_xri <= 1024)
10134 else if (max_xri <= 1536)
10136 else if (max_xri <= 2048)
10145 * lpfc_write_firmware - attempt to write a firmware image to the port
10146 * @fw: pointer to firmware image returned from request_firmware.
10147 * @phba: pointer to lpfc hba data structure.
10151 lpfc_write_firmware(const struct firmware *fw, void *context)
10153 struct lpfc_hba *phba = (struct lpfc_hba *)context;
10154 char fwrev[FW_REV_STR_SIZE];
10155 struct lpfc_grp_hdr *image;
10156 struct list_head dma_buffer_list;
10158 struct lpfc_dmabuf *dmabuf, *next;
10159 uint32_t offset = 0, temp_offset = 0;
10161 /* It can be null in no-wait mode, sanity check */
10166 image = (struct lpfc_grp_hdr *)fw->data;
10168 INIT_LIST_HEAD(&dma_buffer_list);
10169 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
10170 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
10171 LPFC_FILE_TYPE_GROUP) ||
10172 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
10173 (be32_to_cpu(image->size) != fw->size)) {
10174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10175 "3022 Invalid FW image found. "
10176 "Magic:%x Type:%x ID:%x\n",
10177 be32_to_cpu(image->magic_number),
10178 bf_get_be32(lpfc_grp_hdr_file_type, image),
10179 bf_get_be32(lpfc_grp_hdr_id, image));
10183 lpfc_decode_firmware_rev(phba, fwrev, 1);
10184 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
10185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10186 "3023 Updating Firmware, Current Version:%s "
10187 "New Version:%s\n",
10188 fwrev, image->revision);
10189 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
10190 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
10196 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
10200 if (!dmabuf->virt) {
10205 list_add_tail(&dmabuf->list, &dma_buffer_list);
10207 while (offset < fw->size) {
10208 temp_offset = offset;
10209 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
10210 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
10211 memcpy(dmabuf->virt,
10212 fw->data + temp_offset,
10213 fw->size - temp_offset);
10214 temp_offset = fw->size;
10217 memcpy(dmabuf->virt, fw->data + temp_offset,
10219 temp_offset += SLI4_PAGE_SIZE;
10221 rc = lpfc_wr_object(phba, &dma_buffer_list,
10222 (fw->size - offset), &offset);
10230 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
10231 list_del(&dmabuf->list);
10232 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
10233 dmabuf->virt, dmabuf->phys);
10236 release_firmware(fw);
10238 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10239 "3024 Firmware update done: %d.\n", rc);
10244 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
10245 * @phba: pointer to lpfc hba data structure.
10247 * This routine is called to perform Linux generic firmware upgrade on device
10248 * that supports such feature.
10251 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
10253 uint8_t file_name[ELX_MODEL_NAME_SIZE];
10255 const struct firmware *fw;
10257 /* Only supported on SLI4 interface type 2 for now */
10258 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
10259 LPFC_SLI_INTF_IF_TYPE_2)
10262 snprintf(file_name, ELX_MODEL_NAME_SIZE, "/*(DEBLOBBED)*/", phba->ModelName);
10264 if (fw_upgrade == INT_FW_UPGRADE) {
10265 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
10266 file_name, &phba->pcidev->dev,
10267 GFP_KERNEL, (void *)phba,
10268 lpfc_write_firmware);
10269 } else if (fw_upgrade == RUN_FW_UPGRADE) {
10270 ret = reject_firmware(&fw, file_name, &phba->pcidev->dev);
10272 lpfc_write_firmware(fw, (void *)phba);
10281 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
10282 * @pdev: pointer to PCI device
10283 * @pid: pointer to PCI device identifier
10285 * This routine is called from the kernel's PCI subsystem to device with
10286 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10287 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10288 * information of the device and driver to see if the driver state that it
10289 * can support this kind of device. If the match is successful, the driver
10290 * core invokes this routine. If this routine determines it can claim the HBA,
10291 * it does all the initialization that it needs to do to handle the HBA
10295 * 0 - driver can claim the device
10296 * negative value - driver can not claim the device
10299 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
10301 struct lpfc_hba *phba;
10302 struct lpfc_vport *vport = NULL;
10303 struct Scsi_Host *shost = NULL;
10305 uint32_t cfg_mode, intr_mode;
10306 int adjusted_fcp_io_channel;
10308 /* Allocate memory for HBA structure */
10309 phba = lpfc_hba_alloc(pdev);
10313 /* Perform generic PCI device enabling operation */
10314 error = lpfc_enable_pci_dev(phba);
10316 goto out_free_phba;
10318 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10319 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
10321 goto out_disable_pci_dev;
10323 /* Set up SLI-4 specific device PCI memory space */
10324 error = lpfc_sli4_pci_mem_setup(phba);
10326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10327 "1410 Failed to set up pci memory space.\n");
10328 goto out_disable_pci_dev;
10331 /* Set up phase-1 common device driver resources */
10332 error = lpfc_setup_driver_resource_phase1(phba);
10334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10335 "1411 Failed to set up driver resource.\n");
10336 goto out_unset_pci_mem_s4;
10339 /* Set up SLI-4 Specific device driver resources */
10340 error = lpfc_sli4_driver_resource_setup(phba);
10342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10343 "1412 Failed to set up driver resource.\n");
10344 goto out_unset_pci_mem_s4;
10347 /* Initialize and populate the iocb list per host */
10349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10350 "2821 initialize iocb list %d.\n",
10351 phba->cfg_iocb_cnt*1024);
10352 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
10355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10356 "1413 Failed to initialize iocb list.\n");
10357 goto out_unset_driver_resource_s4;
10360 INIT_LIST_HEAD(&phba->active_rrq_list);
10361 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
10363 /* Set up common device driver resources */
10364 error = lpfc_setup_driver_resource_phase2(phba);
10366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10367 "1414 Failed to set up driver resource.\n");
10368 goto out_free_iocb_list;
10371 /* Get the default values for Model Name and Description */
10372 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10374 /* Create SCSI host to the physical port */
10375 error = lpfc_create_shost(phba);
10377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10378 "1415 Failed to create scsi host.\n");
10379 goto out_unset_driver_resource;
10382 /* Configure sysfs attributes */
10383 vport = phba->pport;
10384 error = lpfc_alloc_sysfs_attr(vport);
10386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10387 "1416 Failed to allocate sysfs attr\n");
10388 goto out_destroy_shost;
10391 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
10392 /* Now, trying to enable interrupt and bring up the device */
10393 cfg_mode = phba->cfg_use_msi;
10395 /* Put device to a known state before enabling interrupt */
10396 lpfc_stop_port(phba);
10397 /* Configure and enable interrupt */
10398 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
10399 if (intr_mode == LPFC_INTR_ERROR) {
10400 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10401 "0426 Failed to enable interrupt.\n");
10403 goto out_free_sysfs_attr;
10405 /* Default to single EQ for non-MSI-X */
10406 if (phba->intr_type != MSIX)
10407 adjusted_fcp_io_channel = 1;
10409 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
10410 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
10411 /* Set up SLI-4 HBA */
10412 if (lpfc_sli4_hba_setup(phba)) {
10413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10414 "1421 Failed to set up hba\n");
10416 goto out_disable_intr;
10419 /* Log the current active interrupt mode */
10420 phba->intr_mode = intr_mode;
10421 lpfc_log_intr_mode(phba, intr_mode);
10423 /* Perform post initialization setup */
10424 lpfc_post_init_setup(phba);
10426 /* check for firmware upgrade or downgrade */
10427 if (phba->cfg_request_firmware_upgrade)
10428 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
10430 /* Check if there are static vports to be created. */
10431 lpfc_create_static_vport(phba);
10435 lpfc_sli4_disable_intr(phba);
10436 out_free_sysfs_attr:
10437 lpfc_free_sysfs_attr(vport);
10439 lpfc_destroy_shost(phba);
10440 out_unset_driver_resource:
10441 lpfc_unset_driver_resource_phase2(phba);
10442 out_free_iocb_list:
10443 lpfc_free_iocb_list(phba);
10444 out_unset_driver_resource_s4:
10445 lpfc_sli4_driver_resource_unset(phba);
10446 out_unset_pci_mem_s4:
10447 lpfc_sli4_pci_mem_unset(phba);
10448 out_disable_pci_dev:
10449 lpfc_disable_pci_dev(phba);
10451 scsi_host_put(shost);
10453 lpfc_hba_free(phba);
10458 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
10459 * @pdev: pointer to PCI device
10461 * This routine is called from the kernel's PCI subsystem to device with
10462 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10463 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10464 * device to be removed from the PCI subsystem properly.
10467 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
10469 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10470 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10471 struct lpfc_vport **vports;
10472 struct lpfc_hba *phba = vport->phba;
10475 /* Mark the device unloading flag */
10476 spin_lock_irq(&phba->hbalock);
10477 vport->load_flag |= FC_UNLOADING;
10478 spin_unlock_irq(&phba->hbalock);
10480 /* Free the HBA sysfs attributes */
10481 lpfc_free_sysfs_attr(vport);
10483 /* Release all the vports against this physical port */
10484 vports = lpfc_create_vport_work_array(phba);
10485 if (vports != NULL)
10486 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10487 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10489 fc_vport_terminate(vports[i]->fc_vport);
10491 lpfc_destroy_vport_work_array(phba, vports);
10493 /* Remove FC host and then SCSI host with the physical port */
10494 fc_remove_host(shost);
10495 scsi_remove_host(shost);
10497 /* Perform cleanup on the physical port */
10498 lpfc_cleanup(vport);
10501 * Bring down the SLI Layer. This step disables all interrupts,
10502 * clears the rings, discards all mailbox commands, and resets
10503 * the HBA FCoE function.
10505 lpfc_debugfs_terminate(vport);
10506 lpfc_sli4_hba_unset(phba);
10508 spin_lock_irq(&phba->hbalock);
10509 list_del_init(&vport->listentry);
10510 spin_unlock_irq(&phba->hbalock);
10512 /* Perform scsi free before driver resource_unset since scsi
10513 * buffers are released to their corresponding pools here.
10515 lpfc_scsi_free(phba);
10517 lpfc_sli4_driver_resource_unset(phba);
10519 /* Unmap adapter Control and Doorbell registers */
10520 lpfc_sli4_pci_mem_unset(phba);
10522 /* Release PCI resources and disable device's PCI function */
10523 scsi_host_put(shost);
10524 lpfc_disable_pci_dev(phba);
10526 /* Finally, free the driver's device data structure */
10527 lpfc_hba_free(phba);
10533 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
10534 * @pdev: pointer to PCI device
10535 * @msg: power management message
10537 * This routine is called from the kernel's PCI subsystem to support system
10538 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10539 * this method, it quiesces the device by stopping the driver's worker
10540 * thread for the device, turning off device's interrupt and DMA, and bring
10541 * the device offline. Note that as the driver implements the minimum PM
10542 * requirements to a power-aware driver's PM support for suspend/resume -- all
10543 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10544 * method call will be treated as SUSPEND and the driver will fully
10545 * reinitialize its device during resume() method call, the driver will set
10546 * device to PCI_D3hot state in PCI config space instead of setting it
10547 * according to the @msg provided by the PM.
10550 * 0 - driver suspended the device
10554 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
10556 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10557 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10559 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10560 "2843 PCI device Power Management suspend.\n");
10562 /* Bring down the device */
10563 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10564 lpfc_offline(phba);
10565 kthread_stop(phba->worker_thread);
10567 /* Disable interrupt from device */
10568 lpfc_sli4_disable_intr(phba);
10569 lpfc_sli4_queue_destroy(phba);
10571 /* Save device state to PCI config space */
10572 pci_save_state(pdev);
10573 pci_set_power_state(pdev, PCI_D3hot);
10579 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
10580 * @pdev: pointer to PCI device
10582 * This routine is called from the kernel's PCI subsystem to support system
10583 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10584 * this method, it restores the device's PCI config space state and fully
10585 * reinitializes the device and brings it online. Note that as the driver
10586 * implements the minimum PM requirements to a power-aware driver's PM for
10587 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10588 * to the suspend() method call will be treated as SUSPEND and the driver
10589 * will fully reinitialize its device during resume() method call, the device
10590 * will be set to PCI_D0 directly in PCI config space before restoring the
10594 * 0 - driver suspended the device
10598 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10600 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10601 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10602 uint32_t intr_mode;
10605 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10606 "0292 PCI device Power Management resume.\n");
10608 /* Restore device state from PCI config space */
10609 pci_set_power_state(pdev, PCI_D0);
10610 pci_restore_state(pdev);
10613 * As the new kernel behavior of pci_restore_state() API call clears
10614 * device saved_state flag, need to save the restored state again.
10616 pci_save_state(pdev);
10618 if (pdev->is_busmaster)
10619 pci_set_master(pdev);
10621 /* Startup the kernel thread for this host adapter. */
10622 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10623 "lpfc_worker_%d", phba->brd_no);
10624 if (IS_ERR(phba->worker_thread)) {
10625 error = PTR_ERR(phba->worker_thread);
10626 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10627 "0293 PM resume failed to start worker "
10628 "thread: error=x%x.\n", error);
10632 /* Configure and enable interrupt */
10633 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10634 if (intr_mode == LPFC_INTR_ERROR) {
10635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10636 "0294 PM resume Failed to enable interrupt\n");
10639 phba->intr_mode = intr_mode;
10641 /* Restart HBA and bring it online */
10642 lpfc_sli_brdrestart(phba);
10645 /* Log the current active interrupt mode */
10646 lpfc_log_intr_mode(phba, phba->intr_mode);
10652 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10653 * @phba: pointer to lpfc hba data structure.
10655 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10656 * aborts all the outstanding SCSI I/Os to the pci device.
10659 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10662 "2828 PCI channel I/O abort preparing for recovery\n");
10664 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10665 * and let the SCSI mid-layer to retry them to recover.
10667 lpfc_sli_abort_fcp_rings(phba);
10671 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10672 * @phba: pointer to lpfc hba data structure.
10674 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10675 * disables the device interrupt and pci device, and aborts the internal FCP
10679 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
10681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10682 "2826 PCI channel disable preparing for reset\n");
10684 /* Block any management I/Os to the device */
10685 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
10687 /* Block all SCSI devices' I/Os on the host */
10688 lpfc_scsi_dev_block(phba);
10690 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10691 lpfc_sli_flush_fcp_rings(phba);
10693 /* stop all timers */
10694 lpfc_stop_hba_timers(phba);
10696 /* Disable interrupt and pci device */
10697 lpfc_sli4_disable_intr(phba);
10698 lpfc_sli4_queue_destroy(phba);
10699 pci_disable_device(phba->pcidev);
10703 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10704 * @phba: pointer to lpfc hba data structure.
10706 * This routine is called to prepare the SLI4 device for PCI slot permanently
10707 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10711 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10714 "2827 PCI channel permanent disable for failure\n");
10716 /* Block all SCSI devices' I/Os on the host */
10717 lpfc_scsi_dev_block(phba);
10719 /* stop all timers */
10720 lpfc_stop_hba_timers(phba);
10722 /* Clean up all driver's outstanding SCSI I/Os */
10723 lpfc_sli_flush_fcp_rings(phba);
10727 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10728 * @pdev: pointer to PCI device.
10729 * @state: the current PCI connection state.
10731 * This routine is called from the PCI subsystem for error handling to device
10732 * with SLI-4 interface spec. This function is called by the PCI subsystem
10733 * after a PCI bus error affecting this device has been detected. When this
10734 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10735 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10736 * for the PCI subsystem to perform proper recovery as desired.
10739 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10740 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10742 static pci_ers_result_t
10743 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10745 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10746 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10749 case pci_channel_io_normal:
10750 /* Non-fatal error, prepare for recovery */
10751 lpfc_sli4_prep_dev_for_recover(phba);
10752 return PCI_ERS_RESULT_CAN_RECOVER;
10753 case pci_channel_io_frozen:
10754 /* Fatal error, prepare for slot reset */
10755 lpfc_sli4_prep_dev_for_reset(phba);
10756 return PCI_ERS_RESULT_NEED_RESET;
10757 case pci_channel_io_perm_failure:
10758 /* Permanent failure, prepare for device down */
10759 lpfc_sli4_prep_dev_for_perm_failure(phba);
10760 return PCI_ERS_RESULT_DISCONNECT;
10762 /* Unknown state, prepare and request slot reset */
10763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10764 "2825 Unknown PCI error state: x%x\n", state);
10765 lpfc_sli4_prep_dev_for_reset(phba);
10766 return PCI_ERS_RESULT_NEED_RESET;
10771 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10772 * @pdev: pointer to PCI device.
10774 * This routine is called from the PCI subsystem for error handling to device
10775 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10776 * restart the PCI card from scratch, as if from a cold-boot. During the
10777 * PCI subsystem error recovery, after the driver returns
10778 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10779 * recovery and then call this routine before calling the .resume method to
10780 * recover the device. This function will initialize the HBA device, enable
10781 * the interrupt, but it will just put the HBA to offline state without
10782 * passing any I/O traffic.
10785 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10786 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10788 static pci_ers_result_t
10789 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10791 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10792 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10793 struct lpfc_sli *psli = &phba->sli;
10794 uint32_t intr_mode;
10796 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10797 if (pci_enable_device_mem(pdev)) {
10798 printk(KERN_ERR "lpfc: Cannot re-enable "
10799 "PCI device after reset.\n");
10800 return PCI_ERS_RESULT_DISCONNECT;
10803 pci_restore_state(pdev);
10806 * As the new kernel behavior of pci_restore_state() API call clears
10807 * device saved_state flag, need to save the restored state again.
10809 pci_save_state(pdev);
10811 if (pdev->is_busmaster)
10812 pci_set_master(pdev);
10814 spin_lock_irq(&phba->hbalock);
10815 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10816 spin_unlock_irq(&phba->hbalock);
10818 /* Configure and enable interrupt */
10819 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10820 if (intr_mode == LPFC_INTR_ERROR) {
10821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10822 "2824 Cannot re-enable interrupt after "
10824 return PCI_ERS_RESULT_DISCONNECT;
10826 phba->intr_mode = intr_mode;
10828 /* Log the current active interrupt mode */
10829 lpfc_log_intr_mode(phba, phba->intr_mode);
10831 return PCI_ERS_RESULT_RECOVERED;
10835 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10836 * @pdev: pointer to PCI device
10838 * This routine is called from the PCI subsystem for error handling to device
10839 * with SLI-4 interface spec. It is called when kernel error recovery tells
10840 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10841 * error recovery. After this call, traffic can start to flow from this device
10845 lpfc_io_resume_s4(struct pci_dev *pdev)
10847 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10848 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10851 * In case of slot reset, as function reset is performed through
10852 * mailbox command which needs DMA to be enabled, this operation
10853 * has to be moved to the io resume phase. Taking device offline
10854 * will perform the necessary cleanup.
10856 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10857 /* Perform device reset */
10858 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10859 lpfc_offline(phba);
10860 lpfc_sli_brdrestart(phba);
10861 /* Bring the device back online */
10865 /* Clean up Advanced Error Reporting (AER) if needed */
10866 if (phba->hba_flag & HBA_AER_ENABLED)
10867 pci_cleanup_aer_uncorrect_error_status(pdev);
10871 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
10872 * @pdev: pointer to PCI device
10873 * @pid: pointer to PCI device identifier
10875 * This routine is to be registered to the kernel's PCI subsystem. When an
10876 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
10877 * at PCI device-specific information of the device and driver to see if the
10878 * driver state that it can support this kind of device. If the match is
10879 * successful, the driver core invokes this routine. This routine dispatches
10880 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
10881 * do all the initialization that it needs to do to handle the HBA device
10885 * 0 - driver can claim the device
10886 * negative value - driver can not claim the device
10889 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10892 struct lpfc_sli_intf intf;
10894 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
10897 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
10898 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
10899 rc = lpfc_pci_probe_one_s4(pdev, pid);
10901 rc = lpfc_pci_probe_one_s3(pdev, pid);
10907 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
10908 * @pdev: pointer to PCI device
10910 * This routine is to be registered to the kernel's PCI subsystem. When an
10911 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
10912 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
10913 * remove routine, which will perform all the necessary cleanup for the
10914 * device to be removed from the PCI subsystem properly.
10917 lpfc_pci_remove_one(struct pci_dev *pdev)
10919 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10920 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10922 switch (phba->pci_dev_grp) {
10923 case LPFC_PCI_DEV_LP:
10924 lpfc_pci_remove_one_s3(pdev);
10926 case LPFC_PCI_DEV_OC:
10927 lpfc_pci_remove_one_s4(pdev);
10930 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10931 "1424 Invalid PCI device group: 0x%x\n",
10932 phba->pci_dev_grp);
10939 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
10940 * @pdev: pointer to PCI device
10941 * @msg: power management message
10943 * This routine is to be registered to the kernel's PCI subsystem to support
10944 * system Power Management (PM). When PM invokes this method, it dispatches
10945 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
10946 * suspend the device.
10949 * 0 - driver suspended the device
10953 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10955 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10956 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10959 switch (phba->pci_dev_grp) {
10960 case LPFC_PCI_DEV_LP:
10961 rc = lpfc_pci_suspend_one_s3(pdev, msg);
10963 case LPFC_PCI_DEV_OC:
10964 rc = lpfc_pci_suspend_one_s4(pdev, msg);
10967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10968 "1425 Invalid PCI device group: 0x%x\n",
10969 phba->pci_dev_grp);
10976 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
10977 * @pdev: pointer to PCI device
10979 * This routine is to be registered to the kernel's PCI subsystem to support
10980 * system Power Management (PM). When PM invokes this method, it dispatches
10981 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
10982 * resume the device.
10985 * 0 - driver suspended the device
10989 lpfc_pci_resume_one(struct pci_dev *pdev)
10991 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10992 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10995 switch (phba->pci_dev_grp) {
10996 case LPFC_PCI_DEV_LP:
10997 rc = lpfc_pci_resume_one_s3(pdev);
10999 case LPFC_PCI_DEV_OC:
11000 rc = lpfc_pci_resume_one_s4(pdev);
11003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11004 "1426 Invalid PCI device group: 0x%x\n",
11005 phba->pci_dev_grp);
11012 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
11013 * @pdev: pointer to PCI device.
11014 * @state: the current PCI connection state.
11016 * This routine is registered to the PCI subsystem for error handling. This
11017 * function is called by the PCI subsystem after a PCI bus error affecting
11018 * this device has been detected. When this routine is invoked, it dispatches
11019 * the action to the proper SLI-3 or SLI-4 device error detected handling
11020 * routine, which will perform the proper error detected operation.
11023 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11024 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11026 static pci_ers_result_t
11027 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11029 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11030 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11031 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11033 switch (phba->pci_dev_grp) {
11034 case LPFC_PCI_DEV_LP:
11035 rc = lpfc_io_error_detected_s3(pdev, state);
11037 case LPFC_PCI_DEV_OC:
11038 rc = lpfc_io_error_detected_s4(pdev, state);
11041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11042 "1427 Invalid PCI device group: 0x%x\n",
11043 phba->pci_dev_grp);
11050 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
11051 * @pdev: pointer to PCI device.
11053 * This routine is registered to the PCI subsystem for error handling. This
11054 * function is called after PCI bus has been reset to restart the PCI card
11055 * from scratch, as if from a cold-boot. When this routine is invoked, it
11056 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
11057 * routine, which will perform the proper device reset.
11060 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11061 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11063 static pci_ers_result_t
11064 lpfc_io_slot_reset(struct pci_dev *pdev)
11066 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11067 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11068 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11070 switch (phba->pci_dev_grp) {
11071 case LPFC_PCI_DEV_LP:
11072 rc = lpfc_io_slot_reset_s3(pdev);
11074 case LPFC_PCI_DEV_OC:
11075 rc = lpfc_io_slot_reset_s4(pdev);
11078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11079 "1428 Invalid PCI device group: 0x%x\n",
11080 phba->pci_dev_grp);
11087 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
11088 * @pdev: pointer to PCI device
11090 * This routine is registered to the PCI subsystem for error handling. It
11091 * is called when kernel error recovery tells the lpfc driver that it is
11092 * OK to resume normal PCI operation after PCI bus error recovery. When
11093 * this routine is invoked, it dispatches the action to the proper SLI-3
11094 * or SLI-4 device io_resume routine, which will resume the device operation.
11097 lpfc_io_resume(struct pci_dev *pdev)
11099 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11100 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11102 switch (phba->pci_dev_grp) {
11103 case LPFC_PCI_DEV_LP:
11104 lpfc_io_resume_s3(pdev);
11106 case LPFC_PCI_DEV_OC:
11107 lpfc_io_resume_s4(pdev);
11110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11111 "1429 Invalid PCI device group: 0x%x\n",
11112 phba->pci_dev_grp);
11119 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
11120 * @phba: pointer to lpfc hba data structure.
11122 * This routine checks to see if OAS is supported for this adapter. If
11123 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
11124 * the enable oas flag is cleared and the pool created for OAS device data
11129 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
11132 if (!phba->cfg_EnableXLane)
11135 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
11139 if (phba->device_data_mem_pool)
11140 mempool_destroy(phba->device_data_mem_pool);
11141 phba->device_data_mem_pool = NULL;
11148 * lpfc_fof_queue_setup - Set up all the fof queues
11149 * @phba: pointer to lpfc hba data structure.
11151 * This routine is invoked to set up all the fof queues for the FC HBA
11156 * -ENOMEM - No available memory
11159 lpfc_fof_queue_setup(struct lpfc_hba *phba)
11161 struct lpfc_sli *psli = &phba->sli;
11164 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
11168 if (phba->cfg_fof) {
11170 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
11171 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
11175 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
11176 phba->sli4_hba.oas_cq, LPFC_FCP);
11180 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
11181 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
11187 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
11189 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
11195 * lpfc_fof_queue_create - Create all the fof queues
11196 * @phba: pointer to lpfc hba data structure.
11198 * This routine is invoked to allocate all the fof queues for the FC HBA
11199 * operation. For each SLI4 queue type, the parameters such as queue entry
11200 * count (queue depth) shall be taken from the module parameter. For now,
11201 * we just use some constant number as place holder.
11205 * -ENOMEM - No availble memory
11206 * -EIO - The mailbox failed to complete successfully.
11209 lpfc_fof_queue_create(struct lpfc_hba *phba)
11211 struct lpfc_queue *qdesc;
11213 /* Create FOF EQ */
11214 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
11215 phba->sli4_hba.eq_ecount);
11219 phba->sli4_hba.fof_eq = qdesc;
11221 if (phba->cfg_fof) {
11223 /* Create OAS CQ */
11224 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
11225 phba->sli4_hba.cq_ecount);
11229 phba->sli4_hba.oas_cq = qdesc;
11231 /* Create OAS WQ */
11232 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
11233 phba->sli4_hba.wq_ecount);
11237 phba->sli4_hba.oas_wq = qdesc;
11243 lpfc_fof_queue_destroy(phba);
11248 * lpfc_fof_queue_destroy - Destroy all the fof queues
11249 * @phba: pointer to lpfc hba data structure.
11251 * This routine is invoked to release all the SLI4 queues with the FC HBA
11258 lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11260 /* Release FOF Event queue */
11261 if (phba->sli4_hba.fof_eq != NULL) {
11262 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11263 phba->sli4_hba.fof_eq = NULL;
11266 /* Release OAS Completion queue */
11267 if (phba->sli4_hba.oas_cq != NULL) {
11268 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11269 phba->sli4_hba.oas_cq = NULL;
11272 /* Release OAS Work queue */
11273 if (phba->sli4_hba.oas_wq != NULL) {
11274 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11275 phba->sli4_hba.oas_wq = NULL;
11280 static struct pci_device_id lpfc_id_table[] = {
11281 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
11282 PCI_ANY_ID, PCI_ANY_ID, },
11283 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
11284 PCI_ANY_ID, PCI_ANY_ID, },
11285 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
11286 PCI_ANY_ID, PCI_ANY_ID, },
11287 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
11288 PCI_ANY_ID, PCI_ANY_ID, },
11289 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
11290 PCI_ANY_ID, PCI_ANY_ID, },
11291 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
11292 PCI_ANY_ID, PCI_ANY_ID, },
11293 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
11294 PCI_ANY_ID, PCI_ANY_ID, },
11295 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
11296 PCI_ANY_ID, PCI_ANY_ID, },
11297 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
11298 PCI_ANY_ID, PCI_ANY_ID, },
11299 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
11300 PCI_ANY_ID, PCI_ANY_ID, },
11301 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
11302 PCI_ANY_ID, PCI_ANY_ID, },
11303 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
11304 PCI_ANY_ID, PCI_ANY_ID, },
11305 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
11306 PCI_ANY_ID, PCI_ANY_ID, },
11307 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
11308 PCI_ANY_ID, PCI_ANY_ID, },
11309 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
11310 PCI_ANY_ID, PCI_ANY_ID, },
11311 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
11312 PCI_ANY_ID, PCI_ANY_ID, },
11313 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
11314 PCI_ANY_ID, PCI_ANY_ID, },
11315 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
11316 PCI_ANY_ID, PCI_ANY_ID, },
11317 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
11318 PCI_ANY_ID, PCI_ANY_ID, },
11319 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
11320 PCI_ANY_ID, PCI_ANY_ID, },
11321 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
11322 PCI_ANY_ID, PCI_ANY_ID, },
11323 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
11324 PCI_ANY_ID, PCI_ANY_ID, },
11325 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
11326 PCI_ANY_ID, PCI_ANY_ID, },
11327 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
11328 PCI_ANY_ID, PCI_ANY_ID, },
11329 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
11330 PCI_ANY_ID, PCI_ANY_ID, },
11331 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
11332 PCI_ANY_ID, PCI_ANY_ID, },
11333 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
11334 PCI_ANY_ID, PCI_ANY_ID, },
11335 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
11336 PCI_ANY_ID, PCI_ANY_ID, },
11337 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
11338 PCI_ANY_ID, PCI_ANY_ID, },
11339 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
11340 PCI_ANY_ID, PCI_ANY_ID, },
11341 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
11342 PCI_ANY_ID, PCI_ANY_ID, },
11343 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
11344 PCI_ANY_ID, PCI_ANY_ID, },
11345 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
11346 PCI_ANY_ID, PCI_ANY_ID, },
11347 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
11348 PCI_ANY_ID, PCI_ANY_ID, },
11349 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
11350 PCI_ANY_ID, PCI_ANY_ID, },
11351 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
11352 PCI_ANY_ID, PCI_ANY_ID, },
11353 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
11354 PCI_ANY_ID, PCI_ANY_ID, },
11355 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
11356 PCI_ANY_ID, PCI_ANY_ID, },
11357 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
11358 PCI_ANY_ID, PCI_ANY_ID, },
11359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
11360 PCI_ANY_ID, PCI_ANY_ID, },
11361 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
11362 PCI_ANY_ID, PCI_ANY_ID, },
11363 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
11364 PCI_ANY_ID, PCI_ANY_ID, },
11365 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
11366 PCI_ANY_ID, PCI_ANY_ID, },
11367 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
11368 PCI_ANY_ID, PCI_ANY_ID, },
11369 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
11370 PCI_ANY_ID, PCI_ANY_ID, },
11371 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
11372 PCI_ANY_ID, PCI_ANY_ID, },
11373 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
11374 PCI_ANY_ID, PCI_ANY_ID, },
11375 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
11376 PCI_ANY_ID, PCI_ANY_ID, },
11380 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
11382 static const struct pci_error_handlers lpfc_err_handler = {
11383 .error_detected = lpfc_io_error_detected,
11384 .slot_reset = lpfc_io_slot_reset,
11385 .resume = lpfc_io_resume,
11388 static struct pci_driver lpfc_driver = {
11389 .name = LPFC_DRIVER_NAME,
11390 .id_table = lpfc_id_table,
11391 .probe = lpfc_pci_probe_one,
11392 .remove = lpfc_pci_remove_one,
11393 .shutdown = lpfc_pci_remove_one,
11394 .suspend = lpfc_pci_suspend_one,
11395 .resume = lpfc_pci_resume_one,
11396 .err_handler = &lpfc_err_handler,
11399 static const struct file_operations lpfc_mgmt_fop = {
11400 .owner = THIS_MODULE,
11403 static struct miscdevice lpfc_mgmt_dev = {
11404 .minor = MISC_DYNAMIC_MINOR,
11405 .name = "lpfcmgmt",
11406 .fops = &lpfc_mgmt_fop,
11410 * lpfc_init - lpfc module initialization routine
11412 * This routine is to be invoked when the lpfc module is loaded into the
11413 * kernel. The special kernel macro module_init() is used to indicate the
11414 * role of this routine to the kernel as lpfc module entry point.
11418 * -ENOMEM - FC attach transport failed
11419 * all others - failed
11427 printk(LPFC_MODULE_DESC "\n");
11428 printk(LPFC_COPYRIGHT "\n");
11430 error = misc_register(&lpfc_mgmt_dev);
11432 printk(KERN_ERR "Could not register lpfcmgmt device, "
11433 "misc_register returned with status %d", error);
11435 if (lpfc_enable_npiv) {
11436 lpfc_transport_functions.vport_create = lpfc_vport_create;
11437 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
11439 lpfc_transport_template =
11440 fc_attach_transport(&lpfc_transport_functions);
11441 if (lpfc_transport_template == NULL)
11443 if (lpfc_enable_npiv) {
11444 lpfc_vport_transport_template =
11445 fc_attach_transport(&lpfc_vport_transport_functions);
11446 if (lpfc_vport_transport_template == NULL) {
11447 fc_release_transport(lpfc_transport_template);
11452 /* Initialize in case vector mapping is needed */
11453 lpfc_used_cpu = NULL;
11454 lpfc_present_cpu = 0;
11455 for_each_present_cpu(cpu)
11456 lpfc_present_cpu++;
11458 error = pci_register_driver(&lpfc_driver);
11460 fc_release_transport(lpfc_transport_template);
11461 if (lpfc_enable_npiv)
11462 fc_release_transport(lpfc_vport_transport_template);
11469 * lpfc_exit - lpfc module removal routine
11471 * This routine is invoked when the lpfc module is removed from the kernel.
11472 * The special kernel macro module_exit() is used to indicate the role of
11473 * this routine to the kernel as lpfc module exit point.
11478 misc_deregister(&lpfc_mgmt_dev);
11479 pci_unregister_driver(&lpfc_driver);
11480 fc_release_transport(lpfc_transport_template);
11481 if (lpfc_enable_npiv)
11482 fc_release_transport(lpfc_vport_transport_template);
11483 if (_dump_buf_data) {
11484 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
11485 "_dump_buf_data at 0x%p\n",
11486 (1L << _dump_buf_data_order), _dump_buf_data);
11487 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
11490 if (_dump_buf_dif) {
11491 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
11492 "_dump_buf_dif at 0x%p\n",
11493 (1L << _dump_buf_dif_order), _dump_buf_dif);
11494 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
11496 kfree(lpfc_used_cpu);
11497 idr_destroy(&lpfc_hba_index);
11500 module_init(lpfc_init);
11501 module_exit(lpfc_exit);
11502 MODULE_LICENSE("GPL");
11503 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
11504 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11505 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);