2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 #include <linux/pci.h>
11 #include <linux/ratelimit.h>
12 #include <linux/vmalloc.h>
13 #include <scsi/scsi_tcq.h>
14 #include <linux/utsname.h>
17 /* QLAFX00 specific Mailbox implementation functions */
20 * qlafx00_mailbox_command
21 * Issue mailbox command and waits for completion.
24 * ha = adapter block pointer.
25 * mcp = driver internal mbx struct pointer.
28 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
31 * 0 : QLA_SUCCESS = cmd performed success
32 * 1 : QLA_FUNCTION_FAILED (error encountered)
33 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
39 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
43 unsigned long flags = 0;
49 uint32_t __iomem *optr;
52 unsigned long wait_time;
53 struct qla_hw_data *ha = vha->hw;
54 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
56 if (ha->pdev->error_state > pci_channel_io_frozen) {
57 ql_log(ql_log_warn, vha, 0x115c,
58 "error_state is greater than pci_channel_io_frozen, "
60 return QLA_FUNCTION_TIMEOUT;
63 if (vha->device_flags & DFLG_DEV_FAILED) {
64 ql_log(ql_log_warn, vha, 0x115f,
65 "Device in failed state, exiting.\n");
66 return QLA_FUNCTION_TIMEOUT;
70 io_lock_on = base_vha->flags.init_done;
73 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
75 if (ha->flags.pci_channel_io_perm_failure) {
76 ql_log(ql_log_warn, vha, 0x1175,
77 "Perm failure on EEH timeout MBX, exiting.\n");
78 return QLA_FUNCTION_TIMEOUT;
81 if (ha->flags.isp82xx_fw_hung) {
82 /* Setting Link-Down error */
83 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
84 ql_log(ql_log_warn, vha, 0x1176,
85 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
86 rval = QLA_FUNCTION_FAILED;
91 * Wait for active mailbox commands to finish by waiting at most tov
92 * seconds. This is to serialize actual issuing of mailbox cmds during
95 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
96 /* Timeout occurred. Return error. */
97 ql_log(ql_log_warn, vha, 0x1177,
98 "Cmd access timeout, cmd=0x%x, Exiting.\n",
100 return QLA_FUNCTION_TIMEOUT;
103 ha->flags.mbox_busy = 1;
104 /* Save mailbox command for debug */
107 ql_dbg(ql_dbg_mbx, vha, 0x1178,
108 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
110 spin_lock_irqsave(&ha->hardware_lock, flags);
112 /* Load mailbox registers. */
113 optr = (uint32_t __iomem *)®->ispfx00.mailbox0;
116 command = mcp->mb[0];
117 mboxes = mcp->out_mb;
119 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
121 WRT_REG_DWORD(optr, *iptr);
128 /* Issue set host interrupt command to send cmd out. */
129 ha->flags.mbox_int = 0;
130 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
132 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
133 (uint8_t *)mcp->mb, 16);
134 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
135 ((uint8_t *)mcp->mb + 0x10), 16);
136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
137 ((uint8_t *)mcp->mb + 0x20), 8);
139 /* Unlock mbx registers and wait for interrupt */
140 ql_dbg(ql_dbg_mbx, vha, 0x1179,
141 "Going to unlock irq & waiting for interrupts. "
142 "jiffies=%lx.\n", jiffies);
144 /* Wait for mbx cmd completion until timeout */
145 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
146 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
148 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
149 spin_unlock_irqrestore(&ha->hardware_lock, flags);
151 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
153 ql_dbg(ql_dbg_mbx, vha, 0x112c,
154 "Cmd=%x Polling Mode.\n", command);
156 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
157 spin_unlock_irqrestore(&ha->hardware_lock, flags);
159 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
160 while (!ha->flags.mbox_int) {
161 if (time_after(jiffies, wait_time))
164 /* Check for pending interrupts. */
165 qla2x00_poll(ha->rsp_q_map[0]);
167 if (!ha->flags.mbox_int &&
169 command == MBC_LOAD_RISC_RAM_EXTENDED))
170 usleep_range(10000, 11000);
172 ql_dbg(ql_dbg_mbx, vha, 0x112d,
174 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
177 /* Check whether we timed out */
178 if (ha->flags.mbox_int) {
181 ql_dbg(ql_dbg_mbx, vha, 0x112e,
182 "Cmd=%x completed.\n", command);
184 /* Got interrupt. Clear the flag. */
185 ha->flags.mbox_int = 0;
186 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
188 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
189 rval = QLA_FUNCTION_FAILED;
191 /* Load return mailbox registers. */
193 iptr = (uint32_t *)&ha->mailbox_out32[0];
195 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
205 rval = QLA_FUNCTION_TIMEOUT;
208 ha->flags.mbox_busy = 0;
213 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
214 ql_dbg(ql_dbg_mbx, vha, 0x113a,
215 "checking for additional resp interrupt.\n");
217 /* polling mode for non isp_abort commands. */
218 qla2x00_poll(ha->rsp_q_map[0]);
221 if (rval == QLA_FUNCTION_TIMEOUT &&
222 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
223 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
224 ha->flags.eeh_busy) {
225 /* not in dpc. schedule it for dpc to take over. */
226 ql_dbg(ql_dbg_mbx, vha, 0x115d,
227 "Timeout, schedule isp_abort_needed.\n");
229 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
230 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
231 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
233 ql_log(ql_log_info, base_vha, 0x115e,
234 "Mailbox cmd timeout occurred, cmd=0x%x, "
235 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
236 "abort.\n", command, mcp->mb[0],
238 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
239 qla2xxx_wake_dpc(vha);
241 } else if (!abort_active) {
242 /* call abort directly since we are in the DPC thread */
243 ql_dbg(ql_dbg_mbx, vha, 0x1160,
244 "Timeout, calling abort_isp.\n");
246 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
247 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
248 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
250 ql_log(ql_log_info, base_vha, 0x1161,
251 "Mailbox cmd timeout occurred, cmd=0x%x, "
252 "mb[0]=0x%x. Scheduling ISP abort ",
253 command, mcp->mb[0]);
255 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
256 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
257 if (ha->isp_ops->abort_isp(vha)) {
258 /* Failed. retry later. */
259 set_bit(ISP_ABORT_NEEDED,
262 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
263 ql_dbg(ql_dbg_mbx, vha, 0x1162,
264 "Finished abort_isp.\n");
270 /* Allow next mbx cmd to come in. */
271 complete(&ha->mbx_cmd_comp);
274 ql_log(ql_log_warn, base_vha, 0x1163,
275 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
276 "mb[3]=%x, cmd=%x ****.\n",
277 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
279 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
286 * qlafx00_driver_shutdown
287 * Indicate a driver shutdown to firmware.
290 * ha = adapter block pointer.
293 * local function return status code.
299 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
302 struct mbx_cmd_32 mc;
303 struct mbx_cmd_32 *mcp = &mc;
305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
306 "Entered %s.\n", __func__);
308 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
314 mcp->tov = MBX_TOV_SECONDS;
316 rval = qlafx00_mailbox_command(vha, mcp);
318 if (rval != QLA_SUCCESS) {
319 ql_dbg(ql_dbg_mbx, vha, 0x1167,
320 "Failed=%x.\n", rval);
322 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
323 "Done %s.\n", __func__);
330 * qlafx00_get_firmware_state
331 * Get adapter firmware state.
334 * ha = adapter block pointer.
335 * TARGET_QUEUE_LOCK must be released.
336 * ADAPTER_STATE_LOCK must be released.
339 * qla7xxx local function return status code.
345 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
348 struct mbx_cmd_32 mc;
349 struct mbx_cmd_32 *mcp = &mc;
351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
352 "Entered %s.\n", __func__);
354 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
356 mcp->in_mb = MBX_1|MBX_0;
357 mcp->tov = MBX_TOV_SECONDS;
359 rval = qlafx00_mailbox_command(vha, mcp);
361 /* Return firmware states. */
362 states[0] = mcp->mb[1];
364 if (rval != QLA_SUCCESS) {
365 ql_dbg(ql_dbg_mbx, vha, 0x116a,
366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
369 "Done %s.\n", __func__);
375 * qlafx00_init_firmware
376 * Initialize adapter firmware.
379 * ha = adapter block pointer.
380 * dptr = Initialization control block pointer.
381 * size = size of initialization control block.
382 * TARGET_QUEUE_LOCK must be released.
383 * ADAPTER_STATE_LOCK must be released.
386 * qlafx00 local function return status code.
392 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
395 struct mbx_cmd_32 mc;
396 struct mbx_cmd_32 *mcp = &mc;
397 struct qla_hw_data *ha = vha->hw;
399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
400 "Entered %s.\n", __func__);
402 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
405 mcp->mb[2] = MSD(ha->init_cb_dma);
406 mcp->mb[3] = LSD(ha->init_cb_dma);
408 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
410 mcp->buf_size = size;
411 mcp->flags = MBX_DMA_OUT;
412 mcp->tov = MBX_TOV_SECONDS;
413 rval = qlafx00_mailbox_command(vha, mcp);
415 if (rval != QLA_SUCCESS) {
416 ql_dbg(ql_dbg_mbx, vha, 0x116d,
417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
420 "Done %s.\n", __func__);
426 * qlafx00_mbx_reg_test
429 qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
432 struct mbx_cmd_32 mc;
433 struct mbx_cmd_32 *mcp = &mc;
435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
436 "Entered %s.\n", __func__);
439 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
449 mcp->mb[10] = 0xBB66;
450 mcp->mb[11] = 0x66BB;
451 mcp->mb[12] = 0xB6B6;
452 mcp->mb[13] = 0x6B6B;
453 mcp->mb[14] = 0x3636;
454 mcp->mb[15] = 0xCCCC;
457 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
458 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
459 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
460 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
462 mcp->flags = MBX_DMA_OUT;
463 mcp->tov = MBX_TOV_SECONDS;
464 rval = qlafx00_mailbox_command(vha, mcp);
465 if (rval == QLA_SUCCESS) {
466 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
467 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
468 rval = QLA_FUNCTION_FAILED;
469 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
470 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
471 rval = QLA_FUNCTION_FAILED;
472 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
473 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
474 rval = QLA_FUNCTION_FAILED;
475 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
476 mcp->mb[31] != 0xCCCC)
477 rval = QLA_FUNCTION_FAILED;
480 if (rval != QLA_SUCCESS) {
481 ql_dbg(ql_dbg_mbx, vha, 0x1170,
482 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
485 "Done %s.\n", __func__);
491 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
494 * Returns 0 on success.
497 qlafx00_pci_config(scsi_qla_host_t *vha)
500 struct qla_hw_data *ha = vha->hw;
502 pci_set_master(ha->pdev);
503 pci_try_set_mwi(ha->pdev);
505 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
506 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
507 w &= ~PCI_COMMAND_INTX_DISABLE;
508 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
510 /* PCIe -- adjust Maximum Read Request Size (2048). */
511 if (pci_is_pcie(ha->pdev))
512 pcie_set_readrq(ha->pdev, 2048);
514 ha->chip_revision = ha->pdev->revision;
520 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
525 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
527 unsigned long flags = 0;
528 struct qla_hw_data *ha = vha->hw;
533 spin_lock_irqsave(&ha->hardware_lock, flags);
535 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
536 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
538 /* stop the XOR DMA engines */
539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
540 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
542 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
544 /* stop the IDMA engines */
545 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
547 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
549 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
551 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
553 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
555 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
557 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
559 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
561 for (i = 0; i < 100000; i++) {
562 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
563 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
568 /* Set all 4 cores in reset */
569 for (i = 0; i < 4; i++) {
570 QLAFX00_SET_HBA_SOC_REG(ha,
571 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
572 QLAFX00_SET_HBA_SOC_REG(ha,
573 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
576 /* Reset all units in Fabric */
577 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
581 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
583 /* Set all 4 core Memory Power Down Registers */
584 for (i = 0; i < 5; i++) {
585 QLAFX00_SET_HBA_SOC_REG(ha,
586 (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
589 /* Reset all interrupt control registers */
590 for (i = 0; i < 115; i++) {
591 QLAFX00_SET_HBA_SOC_REG(ha,
592 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
595 /* Reset Timers control registers. per core */
596 for (core = 0; core < 4; core++)
597 for (i = 0; i < 8; i++)
598 QLAFX00_SET_HBA_SOC_REG(ha,
599 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
601 /* Reset per core IRQ ack register */
602 for (core = 0; core < 4; core++)
603 QLAFX00_SET_HBA_SOC_REG(ha,
604 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
606 /* Set Fabric control and config to defaults */
607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
608 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
610 /* Kick in Fabric units */
611 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
613 /* Kick in Core0 to start boot process */
614 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
616 spin_unlock_irqrestore(&ha->hardware_lock, flags);
618 /* Wait 10secs for soft-reset to complete. */
619 for (cnt = 10; cnt; cnt--) {
626 * qlafx00_soft_reset() - Soft Reset ISPFx00.
629 * Returns 0 on success.
632 qlafx00_soft_reset(scsi_qla_host_t *vha)
634 struct qla_hw_data *ha = vha->hw;
636 if (unlikely(pci_channel_offline(ha->pdev) &&
637 ha->flags.pci_channel_io_perm_failure))
640 ha->isp_ops->disable_intrs(ha);
641 qlafx00_soc_cpu_reset(vha);
645 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
648 * Returns 0 on success.
651 qlafx00_chip_diag(scsi_qla_host_t *vha)
654 struct qla_hw_data *ha = vha->hw;
655 struct req_que *req = ha->req_q_map[0];
657 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
659 rval = qlafx00_mbx_reg_test(vha);
661 ql_log(ql_log_warn, vha, 0x1165,
662 "Failed mailbox send register test\n");
664 /* Flag a successful rval */
671 qlafx00_config_rings(struct scsi_qla_host *vha)
673 struct qla_hw_data *ha = vha->hw;
674 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
676 WRT_REG_DWORD(®->req_q_in, 0);
677 WRT_REG_DWORD(®->req_q_out, 0);
679 WRT_REG_DWORD(®->rsp_q_in, 0);
680 WRT_REG_DWORD(®->rsp_q_out, 0);
683 RD_REG_DWORD(®->rsp_q_out);
687 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
689 struct qla_hw_data *ha = vha->hw;
691 if (pci_is_pcie(ha->pdev)) {
692 strcpy(str, "PCIe iSA");
699 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
701 struct qla_hw_data *ha = vha->hw;
703 snprintf(str, size, "%s", ha->mr.fw_version);
708 qlafx00_enable_intrs(struct qla_hw_data *ha)
710 unsigned long flags = 0;
712 spin_lock_irqsave(&ha->hardware_lock, flags);
713 ha->interrupts_on = 1;
714 QLAFX00_ENABLE_ICNTRL_REG(ha);
715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
719 qlafx00_disable_intrs(struct qla_hw_data *ha)
721 unsigned long flags = 0;
723 spin_lock_irqsave(&ha->hardware_lock, flags);
724 ha->interrupts_on = 0;
725 QLAFX00_DISABLE_ICNTRL_REG(ha);
726 spin_unlock_irqrestore(&ha->hardware_lock, flags);
730 qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
732 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
736 qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
738 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
742 qlafx00_iospace_config(struct qla_hw_data *ha)
744 if (pci_request_selected_regions(ha->pdev, ha->bars,
745 QLA2XXX_DRIVER_NAME)) {
746 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
747 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
749 goto iospace_error_exit;
752 /* Use MMIO operations for all accesses. */
753 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
754 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
755 "Invalid pci I/O region size (%s).\n",
757 goto iospace_error_exit;
759 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
760 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
761 "Invalid PCI mem BAR0 region size (%s), aborting\n",
763 goto iospace_error_exit;
767 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
769 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
770 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
771 goto iospace_error_exit;
774 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
775 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
776 "region #2 not an MMIO resource (%s), aborting\n",
778 goto iospace_error_exit;
780 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
781 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
782 "Invalid PCI mem BAR2 region size (%s), aborting\n",
784 goto iospace_error_exit;
788 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
790 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
791 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
792 goto iospace_error_exit;
795 /* Determine queue resources */
796 ha->max_req_queues = ha->max_rsp_queues = 1;
798 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
799 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
800 ha->bars, ha->cregbase, ha->iobase);
809 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
811 struct qla_hw_data *ha = vha->hw;
812 struct req_que *req = ha->req_q_map[0];
813 struct rsp_que *rsp = ha->rsp_q_map[0];
815 req->length_fx00 = req->length;
816 req->ring_fx00 = req->ring;
817 req->dma_fx00 = req->dma;
819 rsp->length_fx00 = rsp->length;
820 rsp->ring_fx00 = rsp->ring;
821 rsp->dma_fx00 = rsp->dma;
823 ql_dbg(ql_dbg_init, vha, 0x012d,
824 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
825 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
826 req->length_fx00, (u64)req->dma_fx00);
828 ql_dbg(ql_dbg_init, vha, 0x012e,
829 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
830 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
831 rsp->length_fx00, (u64)rsp->dma_fx00);
835 qlafx00_config_queues(struct scsi_qla_host *vha)
837 struct qla_hw_data *ha = vha->hw;
838 struct req_que *req = ha->req_q_map[0];
839 struct rsp_que *rsp = ha->rsp_q_map[0];
840 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
842 req->length = ha->req_que_len;
843 req->ring = (void __force *)ha->iobase + ha->req_que_off;
844 req->dma = bar2_hdl + ha->req_que_off;
845 if ((!req->ring) || (req->length == 0)) {
846 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
847 "Unable to allocate memory for req_ring\n");
848 return QLA_FUNCTION_FAILED;
851 ql_dbg(ql_dbg_init, vha, 0x0130,
852 "req: %p req_ring pointer %p req len 0x%x "
853 "req off 0x%x\n, req->dma: 0x%llx",
854 req, req->ring, req->length,
855 ha->req_que_off, (u64)req->dma);
857 rsp->length = ha->rsp_que_len;
858 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
859 rsp->dma = bar2_hdl + ha->rsp_que_off;
860 if ((!rsp->ring) || (rsp->length == 0)) {
861 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
862 "Unable to allocate memory for rsp_ring\n");
863 return QLA_FUNCTION_FAILED;
866 ql_dbg(ql_dbg_init, vha, 0x0132,
867 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
868 "rsp off 0x%x, rsp->dma: 0x%llx\n",
869 rsp, rsp->ring, rsp->length,
870 ha->rsp_que_off, (u64)rsp->dma);
876 qlafx00_init_fw_ready(scsi_qla_host_t *vha)
880 uint16_t wait_time; /* Wait time */
881 struct qla_hw_data *ha = vha->hw;
882 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
883 uint32_t aenmbx, aenmbx7 = 0;
888 /* 30 seconds wait - Adjust if required */
891 pseudo_aen = RD_REG_DWORD(®->pseudoaen);
892 if (pseudo_aen == 1) {
893 aenmbx7 = RD_REG_DWORD(®->initval7);
894 ha->mbx_intr_code = MSW(aenmbx7);
895 ha->rqstq_intr_code = LSW(aenmbx7);
896 rval = qlafx00_driver_shutdown(vha, 10);
897 if (rval != QLA_SUCCESS)
898 qlafx00_soft_reset(vha);
901 /* wait time before firmware ready */
902 wtime = jiffies + (wait_time * HZ);
904 aenmbx = RD_REG_DWORD(®->aenmailbox0);
906 ql_dbg(ql_dbg_mbx, vha, 0x0133,
907 "aenmbx: 0x%x\n", aenmbx);
910 case MBA_FW_NOT_STARTED:
911 case MBA_FW_STARTING:
915 case MBA_REQ_TRANSFER_ERR:
916 case MBA_RSP_TRANSFER_ERR:
917 case MBA_FW_INIT_FAILURE:
918 qlafx00_soft_reset(vha);
921 case MBA_FW_RESTART_CMPLT:
922 /* Set the mbx and rqstq intr code */
923 aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
924 ha->mbx_intr_code = MSW(aenmbx7);
925 ha->rqstq_intr_code = LSW(aenmbx7);
926 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
927 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
928 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
929 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
930 WRT_REG_DWORD(®->aenmailbox0, 0);
931 RD_REG_DWORD_RELAXED(®->aenmailbox0);
932 ql_dbg(ql_dbg_init, vha, 0x0134,
933 "f/w returned mbx_intr_code: 0x%x, "
934 "rqstq_intr_code: 0x%x\n",
935 ha->mbx_intr_code, ha->rqstq_intr_code);
936 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
942 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
945 /* If fw is apparently not ready. In order to continue,
946 * we might need to issue Mbox cmd, but the problem is
947 * that the DoorBell vector values that come with the
948 * 8060 AEN are most likely gone by now (and thus no
949 * bell would be rung on the fw side when mbox cmd is
950 * issued). We have to therefore grab the 8060 AEN
951 * shadow regs (filled in by FW when the last 8060
952 * AEN was being posted).
953 * Do the following to determine what is needed in
954 * order to get the FW ready:
955 * 1. reload the 8060 AEN values from the shadow regs
956 * 2. clear int status to get rid of possible pending
958 * 3. issue Get FW State Mbox cmd to determine fw state
959 * Set the mbx and rqstq intr code from Shadow Regs
961 aenmbx7 = RD_REG_DWORD(®->initval7);
962 ha->mbx_intr_code = MSW(aenmbx7);
963 ha->rqstq_intr_code = LSW(aenmbx7);
964 ha->req_que_off = RD_REG_DWORD(®->initval1);
965 ha->rsp_que_off = RD_REG_DWORD(®->initval3);
966 ha->req_que_len = RD_REG_DWORD(®->initval5);
967 ha->rsp_que_len = RD_REG_DWORD(®->initval6);
968 ql_dbg(ql_dbg_init, vha, 0x0135,
969 "f/w returned mbx_intr_code: 0x%x, "
970 "rqstq_intr_code: 0x%x\n",
971 ha->mbx_intr_code, ha->rqstq_intr_code);
972 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
974 /* Get the FW state */
975 rval = qlafx00_get_firmware_state(vha, state);
976 if (rval != QLA_SUCCESS) {
977 /* Retry if timer has not expired */
981 if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
982 /* Firmware is waiting to be
983 * initialized by driver
990 /* Issue driver shutdown and wait until f/w recovers.
991 * Driver should continue to poll until 8060 AEN is
992 * received indicating firmware recovery.
994 ql_dbg(ql_dbg_init, vha, 0x0136,
995 "Sending Driver shutdown fw_state 0x%x\n",
998 rval = qlafx00_driver_shutdown(vha, 10);
999 if (rval != QLA_SUCCESS) {
1000 rval = QLA_FUNCTION_FAILED;
1005 wtime = jiffies + (wait_time * HZ);
1010 if (time_after_eq(jiffies, wtime)) {
1011 ql_dbg(ql_dbg_init, vha, 0x0137,
1012 "Init f/w failed: aen[7]: 0x%x\n",
1013 RD_REG_DWORD(®->aenmailbox7));
1014 rval = QLA_FUNCTION_FAILED;
1018 /* Delay for a while */
1024 ql_dbg(ql_dbg_init, vha, 0x0138,
1025 "%s **** FAILED ****.\n", __func__);
1027 ql_dbg(ql_dbg_init, vha, 0x0139,
1028 "%s **** SUCCESS ****.\n", __func__);
1034 * qlafx00_fw_ready() - Waits for firmware ready.
1037 * Returns 0 on success.
1040 qlafx00_fw_ready(scsi_qla_host_t *vha)
1043 unsigned long wtime;
1044 uint16_t wait_time; /* Wait time if loop is coming ready */
1051 /* wait time before firmware ready */
1052 wtime = jiffies + (wait_time * HZ);
1054 /* Wait for ISP to finish init */
1055 if (!vha->flags.init_done)
1056 ql_dbg(ql_dbg_init, vha, 0x013a,
1057 "Waiting for init to complete...\n");
1060 rval = qlafx00_get_firmware_state(vha, state);
1062 if (rval == QLA_SUCCESS) {
1063 if (state[0] == FSTATE_FX00_INITIALIZED) {
1064 ql_dbg(ql_dbg_init, vha, 0x013b,
1065 "fw_state=%x\n", state[0]);
1070 rval = QLA_FUNCTION_FAILED;
1072 if (time_after_eq(jiffies, wtime))
1075 /* Delay for a while */
1078 ql_dbg(ql_dbg_init, vha, 0x013c,
1079 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1084 ql_dbg(ql_dbg_init, vha, 0x013d,
1085 "Firmware ready **** FAILED ****.\n");
1087 ql_dbg(ql_dbg_init, vha, 0x013e,
1088 "Firmware ready **** SUCCESS ****.\n");
1094 qlafx00_find_all_targets(scsi_qla_host_t *vha,
1095 struct list_head *new_fcports)
1099 fc_port_t *fcport, *new_fcport;
1101 struct qla_hw_data *ha = vha->hw;
1105 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1106 return QLA_FUNCTION_FAILED;
1108 if ((atomic_read(&vha->loop_down_timer) ||
1109 STATE_TRANSITION(vha))) {
1110 atomic_set(&vha->loop_down_timer, 0);
1111 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1112 return QLA_FUNCTION_FAILED;
1115 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1116 "Listing Target bit map...\n");
1117 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
1118 0x2089, (uint8_t *)ha->gid_list, 32);
1120 /* Allocate temporary rmtport for any new rmtports discovered. */
1121 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1122 if (new_fcport == NULL)
1123 return QLA_MEMORY_ALLOC_FAILED;
1125 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1126 QLAFX00_TGT_NODE_LIST_SIZE) {
1128 /* Send get target node info */
1129 new_fcport->tgt_id = tgt_id;
1130 rval = qlafx00_fx_disc(vha, new_fcport,
1131 FXDISC_GET_TGT_NODE_INFO);
1132 if (rval != QLA_SUCCESS) {
1133 ql_log(ql_log_warn, vha, 0x208a,
1134 "Target info scan failed -- assuming zero-entry "
1139 /* Locate matching device in database. */
1141 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1142 if (memcmp(new_fcport->port_name,
1143 fcport->port_name, WWN_SIZE))
1149 * If tgt_id is same and state FCS_ONLINE, nothing
1152 if (fcport->tgt_id == new_fcport->tgt_id &&
1153 atomic_read(&fcport->state) == FCS_ONLINE)
1157 * Tgt ID changed or device was marked to be updated.
1159 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1160 "TGT-ID Change(%s): Present tgt id: "
1162 "wwnn = %llx wwpn = %llx.\n",
1163 __func__, fcport->tgt_id,
1164 atomic_read(&fcport->state),
1165 (unsigned long long)wwn_to_u64(fcport->node_name),
1166 (unsigned long long)wwn_to_u64(fcport->port_name));
1168 ql_log(ql_log_info, vha, 0x208c,
1169 "TGT-ID Announce(%s): Discovered tgt "
1170 "id 0x%x wwnn = %llx "
1171 "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1172 (unsigned long long)
1173 wwn_to_u64(new_fcport->node_name),
1174 (unsigned long long)
1175 wwn_to_u64(new_fcport->port_name));
1177 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1178 fcport->old_tgt_id = fcport->tgt_id;
1179 fcport->tgt_id = new_fcport->tgt_id;
1180 ql_log(ql_log_info, vha, 0x208d,
1181 "TGT-ID: New fcport Added: %p\n", fcport);
1182 qla2x00_update_fcport(vha, fcport);
1184 ql_log(ql_log_info, vha, 0x208e,
1185 " Existing TGT-ID %x did not get "
1186 " offline event from firmware.\n",
1187 fcport->old_tgt_id);
1188 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1189 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1199 /* If device was not in our fcports list, then add it. */
1200 list_add_tail(&new_fcport->list, new_fcports);
1202 /* Allocate a new replacement fcport. */
1203 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1204 if (new_fcport == NULL)
1205 return QLA_MEMORY_ALLOC_FAILED;
1213 * qlafx00_configure_all_targets
1214 * Setup target devices with node ID's.
1217 * ha = adapter block pointer.
1224 qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1227 fc_port_t *fcport, *rmptemp;
1228 LIST_HEAD(new_fcports);
1230 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1231 FXDISC_GET_TGT_NODE_LIST);
1232 if (rval != QLA_SUCCESS) {
1233 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1237 rval = qlafx00_find_all_targets(vha, &new_fcports);
1238 if (rval != QLA_SUCCESS) {
1239 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1244 * Delete all previous devices marked lost.
1246 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1247 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1250 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1251 if (fcport->port_type != FCT_INITIATOR)
1252 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1257 * Add the new devices to our devices list.
1259 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1260 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1263 qla2x00_update_fcport(vha, fcport);
1264 list_move_tail(&fcport->list, &vha->vp_fcports);
1265 ql_log(ql_log_info, vha, 0x208f,
1266 "Attach new target id 0x%x wwnn = %llx "
1269 (unsigned long long)wwn_to_u64(fcport->node_name),
1270 (unsigned long long)wwn_to_u64(fcport->port_name));
1273 /* Free all new device structures not processed. */
1274 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1275 list_del(&fcport->list);
1283 * qlafx00_configure_devices
1284 * Updates Fibre Channel Device Database with what is actually on loop.
1287 * ha = adapter block pointer.
1292 * 2 = database was full and device was not configured.
1295 qlafx00_configure_devices(scsi_qla_host_t *vha)
1298 unsigned long flags;
1301 flags = vha->dpc_flags;
1303 ql_dbg(ql_dbg_disc, vha, 0x2090,
1304 "Configure devices -- dpc flags =0x%lx\n", flags);
1306 rval = qlafx00_configure_all_targets(vha);
1308 if (rval == QLA_SUCCESS) {
1309 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1310 rval = QLA_FUNCTION_FAILED;
1312 atomic_set(&vha->loop_state, LOOP_READY);
1313 ql_log(ql_log_info, vha, 0x2091,
1319 ql_dbg(ql_dbg_disc, vha, 0x2092,
1320 "%s *** FAILED ***.\n", __func__);
1322 ql_dbg(ql_dbg_disc, vha, 0x2093,
1323 "%s: exiting normally.\n", __func__);
1329 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1331 struct qla_hw_data *ha = vha->hw;
1334 vha->flags.online = 0;
1335 ha->mr.fw_hbt_en = 0;
1338 ha->flags.chip_reset_done = 0;
1339 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1340 vha->qla_stats.total_isp_aborts++;
1341 ql_log(ql_log_info, vha, 0x013f,
1342 "Performing ISP error recovery - ha = %p.\n", ha);
1343 ha->isp_ops->reset_chip(vha);
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer,
1349 QLAFX00_LOOP_DOWN_TIME);
1351 if (!atomic_read(&vha->loop_down_timer))
1352 atomic_set(&vha->loop_down_timer,
1353 QLAFX00_LOOP_DOWN_TIME);
1356 /* Clear all async request states across all VPs. */
1357 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1359 if (atomic_read(&fcport->state) == FCS_ONLINE)
1360 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1363 if (!ha->flags.eeh_busy) {
1365 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1367 /* Requeue all commands in outstanding command list. */
1368 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1372 qla2x00_free_irqs(vha);
1374 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1376 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1378 /* Clear the Interrupts */
1379 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1381 ql_log(ql_log_info, vha, 0x0140,
1382 "%s Done done - ha=%p.\n", __func__, ha);
1386 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1389 * Beginning of request ring has initialization control block already built
1390 * by nvram config routine.
1392 * Returns 0 on success.
1395 qlafx00_init_response_q_entries(struct rsp_que *rsp)
1400 rsp->ring_ptr = rsp->ring;
1401 rsp->ring_index = 0;
1402 rsp->status_srb = NULL;
1403 pkt = rsp->ring_ptr;
1404 for (cnt = 0; cnt < rsp->length; cnt++) {
1405 pkt->signature = RESPONSE_PROCESSED;
1406 WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
1407 RESPONSE_PROCESSED);
1413 qlafx00_rescan_isp(scsi_qla_host_t *vha)
1415 uint32_t status = QLA_FUNCTION_FAILED;
1416 struct qla_hw_data *ha = vha->hw;
1417 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1420 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1422 aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
1423 ha->mbx_intr_code = MSW(aenmbx7);
1424 ha->rqstq_intr_code = LSW(aenmbx7);
1425 ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
1426 ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
1427 ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
1428 ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
1430 ql_dbg(ql_dbg_disc, vha, 0x2094,
1431 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1432 " Req que offset 0x%x Rsp que offset 0x%x\n",
1433 ha->mbx_intr_code, ha->rqstq_intr_code,
1434 ha->req_que_off, ha->rsp_que_len);
1436 /* Clear the Interrupts */
1437 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1439 status = qla2x00_init_rings(vha);
1441 vha->flags.online = 1;
1443 /* if no cable then assume it's good */
1444 if ((vha->device_flags & DFLG_NO_CABLE))
1446 /* Register system information */
1447 if (qlafx00_fx_disc(vha,
1448 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1449 ql_dbg(ql_dbg_disc, vha, 0x2095,
1450 "failed to register host info\n");
1452 scsi_unblock_requests(vha->host);
1457 qlafx00_timer_routine(scsi_qla_host_t *vha)
1459 struct qla_hw_data *ha = vha->hw;
1460 uint32_t fw_heart_beat;
1462 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1465 /* Check firmware health */
1466 if (ha->mr.fw_hbt_cnt)
1467 ha->mr.fw_hbt_cnt--;
1469 if ((!ha->flags.mr_reset_hdlr_active) &&
1470 (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1471 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1472 (ha->mr.fw_hbt_en)) {
1473 fw_heart_beat = RD_REG_DWORD(®->fwheartbeat);
1474 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1475 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1476 ha->mr.fw_hbt_miss_cnt = 0;
1478 ha->mr.fw_hbt_miss_cnt++;
1479 if (ha->mr.fw_hbt_miss_cnt ==
1480 QLAFX00_HEARTBEAT_MISS_CNT) {
1481 set_bit(ISP_ABORT_NEEDED,
1483 qla2xxx_wake_dpc(vha);
1484 ha->mr.fw_hbt_miss_cnt = 0;
1488 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1491 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1492 /* Reset recovery to be performed in timer routine */
1493 aenmbx0 = RD_REG_DWORD(®->aenmailbox0);
1494 if (ha->mr.fw_reset_timer_exp) {
1495 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1496 qla2xxx_wake_dpc(vha);
1497 ha->mr.fw_reset_timer_exp = 0;
1498 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1499 /* Wake up DPC to rescan the targets */
1500 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1501 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1502 qla2xxx_wake_dpc(vha);
1503 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1504 } else if ((aenmbx0 == MBA_FW_STARTING) &&
1505 (!ha->mr.fw_hbt_en)) {
1506 ha->mr.fw_hbt_en = 1;
1507 } else if (!ha->mr.fw_reset_timer_tick) {
1508 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1509 ha->mr.fw_reset_timer_exp = 1;
1510 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1511 } else if (aenmbx0 == 0xFFFFFFFF) {
1512 uint32_t data0, data1;
1514 data0 = QLAFX00_RD_REG(ha,
1515 QLAFX00_BAR1_BASE_ADDR_REG);
1516 data1 = QLAFX00_RD_REG(ha,
1517 QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1519 data0 &= 0xffff0000;
1520 data1 &= 0x0000ffff;
1523 QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1525 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1526 ha->mr.fw_reset_timer_tick =
1527 QLAFX00_MAX_RESET_INTERVAL;
1528 } else if (aenmbx0 == MBA_FW_RESET_FCT) {
1529 ha->mr.fw_reset_timer_tick =
1530 QLAFX00_MAX_RESET_INTERVAL;
1532 if (ha->mr.old_aenmbx0_state != aenmbx0) {
1533 ha->mr.old_aenmbx0_state = aenmbx0;
1534 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1536 ha->mr.fw_reset_timer_tick--;
1538 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1540 * Critical temperature recovery to be
1541 * performed in timer routine
1543 if (ha->mr.fw_critemp_timer_tick == 0) {
1544 tempc = QLAFX00_GET_TEMPERATURE(ha);
1545 ql_dbg(ql_dbg_timer, vha, 0x6012,
1546 "ISPFx00(%s): Critical temp timer, "
1547 "current SOC temperature: %d\n",
1549 if (tempc < ha->mr.critical_temperature) {
1550 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1551 clear_bit(FX00_CRITEMP_RECOVERY,
1553 qla2xxx_wake_dpc(vha);
1555 ha->mr.fw_critemp_timer_tick =
1556 QLAFX00_CRITEMP_INTERVAL;
1558 ha->mr.fw_critemp_timer_tick--;
1561 if (ha->mr.host_info_resend) {
1563 * Incomplete host info might be sent to firmware
1564 * durinng system boot - info should be resend
1566 if (ha->mr.hinfo_resend_timer_tick == 0) {
1567 ha->mr.host_info_resend = false;
1568 set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
1569 ha->mr.hinfo_resend_timer_tick =
1570 QLAFX00_HINFO_RESEND_INTERVAL;
1571 qla2xxx_wake_dpc(vha);
1573 ha->mr.hinfo_resend_timer_tick--;
1580 * qlfx00a_reset_initialize
1581 * Re-initialize after a iSA device reset.
1584 * ha = adapter block pointer.
1590 qlafx00_reset_initialize(scsi_qla_host_t *vha)
1592 struct qla_hw_data *ha = vha->hw;
1594 if (vha->device_flags & DFLG_DEV_FAILED) {
1595 ql_dbg(ql_dbg_init, vha, 0x0142,
1596 "Device in failed state\n");
1600 ha->flags.mr_reset_hdlr_active = 1;
1602 if (vha->flags.online) {
1603 scsi_block_requests(vha->host);
1604 qlafx00_abort_isp_cleanup(vha, false);
1607 ql_log(ql_log_info, vha, 0x0143,
1608 "(%s): succeeded.\n", __func__);
1609 ha->flags.mr_reset_hdlr_active = 0;
1615 * Resets ISP and aborts all outstanding commands.
1618 * ha = adapter block pointer.
1624 qlafx00_abort_isp(scsi_qla_host_t *vha)
1626 struct qla_hw_data *ha = vha->hw;
1628 if (vha->flags.online) {
1629 if (unlikely(pci_channel_offline(ha->pdev) &&
1630 ha->flags.pci_channel_io_perm_failure)) {
1631 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1635 scsi_block_requests(vha->host);
1636 qlafx00_abort_isp_cleanup(vha, false);
1638 scsi_block_requests(vha->host);
1639 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1640 vha->qla_stats.total_isp_aborts++;
1641 ha->isp_ops->reset_chip(vha);
1642 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1643 /* Clear the Interrupts */
1644 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1647 ql_log(ql_log_info, vha, 0x0145,
1648 "(%s): succeeded.\n", __func__);
1653 static inline fc_port_t*
1654 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1658 /* Check for matching device in remote port list. */
1659 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1660 if (fcport->tgt_id == tgt_id) {
1661 ql_dbg(ql_dbg_async, vha, 0x5072,
1662 "Matching fcport(%p) found with TGT-ID: 0x%x "
1663 "and Remote TGT_ID: 0x%x\n",
1664 fcport, fcport->tgt_id, tgt_id);
1672 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1676 ql_log(ql_log_info, vha, 0x5073,
1677 "Detach TGT-ID: 0x%x\n", tgt_id);
1679 fcport = qlafx00_get_fcport(vha, tgt_id);
1683 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1689 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1692 uint32_t aen_code, aen_data;
1694 aen_code = FCH_EVT_VENDOR_UNIQUE;
1695 aen_data = evt->u.aenfx.evtcode;
1697 switch (evt->u.aenfx.evtcode) {
1698 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
1699 if (evt->u.aenfx.mbx[1] == 0) {
1700 if (evt->u.aenfx.mbx[2] == 1) {
1701 if (!vha->flags.fw_tgt_reported)
1702 vha->flags.fw_tgt_reported = 1;
1703 atomic_set(&vha->loop_down_timer, 0);
1704 atomic_set(&vha->loop_state, LOOP_UP);
1705 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1706 qla2xxx_wake_dpc(vha);
1707 } else if (evt->u.aenfx.mbx[2] == 2) {
1708 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1710 } else if (evt->u.aenfx.mbx[1] == 0xffff) {
1711 if (evt->u.aenfx.mbx[2] == 1) {
1712 if (!vha->flags.fw_tgt_reported)
1713 vha->flags.fw_tgt_reported = 1;
1714 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1715 } else if (evt->u.aenfx.mbx[2] == 2) {
1716 vha->device_flags |= DFLG_NO_CABLE;
1717 qla2x00_mark_all_devices_lost(vha, 1);
1721 case QLAFX00_MBA_LINK_UP:
1722 aen_code = FCH_EVT_LINKUP;
1725 case QLAFX00_MBA_LINK_DOWN:
1726 aen_code = FCH_EVT_LINKDOWN;
1729 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
1730 ql_log(ql_log_info, vha, 0x5082,
1731 "Process critical temperature event "
1733 evt->u.aenfx.evtcode);
1734 scsi_block_requests(vha->host);
1735 qlafx00_abort_isp_cleanup(vha, true);
1736 scsi_unblock_requests(vha->host);
1740 fc_host_post_event(vha->host, fc_get_event_number(),
1741 aen_code, aen_data);
1747 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1749 u64 port_name = 0, node_name = 0;
1751 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1752 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1754 fc_host_node_name(vha->host) = node_name;
1755 fc_host_port_name(vha->host) = port_name;
1756 if (!pinfo->port_type)
1757 vha->hw->current_topology = ISP_CFG_F;
1758 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1759 atomic_set(&vha->loop_state, LOOP_READY);
1760 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1761 atomic_set(&vha->loop_state, LOOP_DOWN);
1762 vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1766 qla2x00_fxdisc_iocb_timeout(void *data)
1768 srb_t *sp = (srb_t *)data;
1769 struct srb_iocb *lio = &sp->u.iocb_cmd;
1771 complete(&lio->u.fxiocb.fxiocb_comp);
1775 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
1777 srb_t *sp = (srb_t *)ptr;
1778 struct srb_iocb *lio = &sp->u.iocb_cmd;
1780 complete(&lio->u.fxiocb.fxiocb_comp);
1784 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1787 struct srb_iocb *fdisc;
1788 int rval = QLA_FUNCTION_FAILED;
1789 struct qla_hw_data *ha = vha->hw;
1790 struct host_system_info *phost_info;
1791 struct register_host_info *preg_hsi;
1792 struct new_utsname *p_sysid = NULL;
1794 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1798 fdisc = &sp->u.iocb_cmd;
1800 case FXDISC_GET_CONFIG_INFO:
1801 fdisc->u.fxiocb.flags =
1802 SRB_FXDISC_RESP_DMA_VALID;
1803 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1805 case FXDISC_GET_PORT_INFO:
1806 fdisc->u.fxiocb.flags =
1807 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1808 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1809 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
1811 case FXDISC_GET_TGT_NODE_INFO:
1812 fdisc->u.fxiocb.flags =
1813 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1814 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1815 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
1817 case FXDISC_GET_TGT_NODE_LIST:
1818 fdisc->u.fxiocb.flags =
1819 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1820 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1822 case FXDISC_REG_HOST_INFO:
1823 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1824 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1825 p_sysid = utsname();
1827 ql_log(ql_log_warn, vha, 0x303c,
1828 "Not able to get the system information\n");
1832 case FXDISC_ABORT_IOCTL:
1837 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1838 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1839 fdisc->u.fxiocb.req_len,
1840 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1841 if (!fdisc->u.fxiocb.req_addr)
1844 if (fx_type == FXDISC_REG_HOST_INFO) {
1845 preg_hsi = (struct register_host_info *)
1846 fdisc->u.fxiocb.req_addr;
1847 phost_info = &preg_hsi->hsi;
1848 memset(preg_hsi, 0, sizeof(struct register_host_info));
1849 phost_info->os_type = OS_TYPE_LINUX;
1850 strncpy(phost_info->sysname,
1851 p_sysid->sysname, SYSNAME_LENGTH);
1852 strncpy(phost_info->nodename,
1853 p_sysid->nodename, NODENAME_LENGTH);
1854 if (!strcmp(phost_info->nodename, "(none)"))
1855 ha->mr.host_info_resend = true;
1856 strncpy(phost_info->release,
1857 p_sysid->release, RELEASE_LENGTH);
1858 strncpy(phost_info->version,
1859 p_sysid->version, VERSION_LENGTH);
1860 strncpy(phost_info->machine,
1861 p_sysid->machine, MACHINE_LENGTH);
1862 strncpy(phost_info->domainname,
1863 p_sysid->domainname, DOMNAME_LENGTH);
1864 strncpy(phost_info->hostdriver,
1865 QLA2XXX_VERSION, VERSION_LENGTH);
1866 preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
1867 ql_dbg(ql_dbg_init, vha, 0x0149,
1868 "ISP%04X: Host registration with firmware\n",
1870 ql_dbg(ql_dbg_init, vha, 0x014a,
1871 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1872 phost_info->os_type,
1873 phost_info->sysname,
1874 phost_info->nodename);
1875 ql_dbg(ql_dbg_init, vha, 0x014b,
1876 "release = '%s', version = '%s'\n",
1877 phost_info->release,
1878 phost_info->version);
1879 ql_dbg(ql_dbg_init, vha, 0x014c,
1881 "domainname = '%s', hostdriver = '%s'\n",
1882 phost_info->machine,
1883 phost_info->domainname,
1884 phost_info->hostdriver);
1885 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1886 (uint8_t *)phost_info,
1887 sizeof(struct host_system_info));
1891 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1892 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1893 fdisc->u.fxiocb.rsp_len,
1894 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1895 if (!fdisc->u.fxiocb.rsp_addr)
1896 goto done_unmap_req;
1899 sp->type = SRB_FXIOCB_DCMD;
1900 sp->name = "fxdisc";
1901 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1902 fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1903 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
1904 sp->done = qla2x00_fxdisc_sp_done;
1906 rval = qla2x00_start_sp(sp);
1907 if (rval != QLA_SUCCESS)
1908 goto done_unmap_dma;
1910 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1912 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1913 struct config_info_data *pinfo =
1914 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1915 strcpy(vha->hw->model_number, pinfo->model_num);
1916 strcpy(vha->hw->model_desc, pinfo->model_description);
1917 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1918 sizeof(vha->hw->mr.symbolic_name));
1919 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1920 sizeof(vha->hw->mr.serial_num));
1921 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1922 sizeof(vha->hw->mr.hw_version));
1923 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1924 sizeof(vha->hw->mr.fw_version));
1925 strim(vha->hw->mr.fw_version);
1926 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1927 sizeof(vha->hw->mr.uboot_version));
1928 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1929 sizeof(vha->hw->mr.fru_serial_num));
1930 vha->hw->mr.critical_temperature =
1931 (pinfo->nominal_temp_value) ?
1932 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
1933 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1934 QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1935 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1936 struct port_info_data *pinfo =
1937 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1938 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1939 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1940 vha->d_id.b.domain = pinfo->port_id[0];
1941 vha->d_id.b.area = pinfo->port_id[1];
1942 vha->d_id.b.al_pa = pinfo->port_id[2];
1943 qlafx00_update_host_attr(vha, pinfo);
1944 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1945 (uint8_t *)pinfo, 16);
1946 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1947 struct qlafx00_tgt_node_info *pinfo =
1948 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1949 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1950 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1951 fcport->port_type = FCT_TARGET;
1952 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1953 (uint8_t *)pinfo, 16);
1954 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1955 struct qlafx00_tgt_node_info *pinfo =
1956 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1957 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1958 (uint8_t *)pinfo, 16);
1959 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1960 } else if (fx_type == FXDISC_ABORT_IOCTL)
1961 fdisc->u.fxiocb.result =
1962 (fdisc->u.fxiocb.result ==
1963 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
1964 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
1966 rval = le32_to_cpu(fdisc->u.fxiocb.result);
1969 if (fdisc->u.fxiocb.rsp_addr)
1970 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1971 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
1974 if (fdisc->u.fxiocb.req_addr)
1975 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1976 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
1984 * qlafx00_initialize_adapter
1988 * ha = adapter block pointer.
1994 qlafx00_initialize_adapter(scsi_qla_host_t *vha)
1997 struct qla_hw_data *ha = vha->hw;
2000 /* Clear adapter flags. */
2001 vha->flags.online = 0;
2002 ha->flags.chip_reset_done = 0;
2003 vha->flags.reset_active = 0;
2004 ha->flags.pci_channel_io_perm_failure = 0;
2005 ha->flags.eeh_busy = 0;
2006 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2007 atomic_set(&vha->loop_state, LOOP_DOWN);
2008 vha->device_flags = DFLG_NO_CABLE;
2010 vha->flags.management_server_logged_in = 0;
2011 ha->isp_abort_cnt = 0;
2012 ha->beacon_blink_led = 0;
2014 set_bit(0, ha->req_qid_map);
2015 set_bit(0, ha->rsp_qid_map);
2017 ql_dbg(ql_dbg_init, vha, 0x0147,
2018 "Configuring PCI space...\n");
2020 rval = ha->isp_ops->pci_config(vha);
2022 ql_log(ql_log_warn, vha, 0x0148,
2023 "Unable to configure PCI space.\n");
2027 rval = qlafx00_init_fw_ready(vha);
2028 if (rval != QLA_SUCCESS)
2031 qlafx00_save_queue_ptrs(vha);
2033 rval = qlafx00_config_queues(vha);
2034 if (rval != QLA_SUCCESS)
2038 * Allocate the array of outstanding commands
2039 * now that we know the firmware resources.
2041 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2042 if (rval != QLA_SUCCESS)
2045 rval = qla2x00_init_rings(vha);
2046 ha->flags.chip_reset_done = 1;
2048 tempc = QLAFX00_GET_TEMPERATURE(ha);
2049 ql_dbg(ql_dbg_init, vha, 0x0152,
2050 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2057 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2060 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2061 int rval = QLA_FUNCTION_FAILED;
2064 if (qla2x00_reset_active(vha))
2065 ql_log(ql_log_warn, vha, 0x70ce,
2066 "ISP reset active.\n");
2067 else if (!vha->hw->flags.eeh_busy) {
2068 rval = qlafx00_get_firmware_state(vha, state);
2070 if (rval != QLA_SUCCESS)
2071 memset(state, -1, sizeof(state));
2077 qlafx00_get_host_speed(struct Scsi_Host *shost)
2079 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2080 (shost_priv(shost)))->hw;
2081 u32 speed = FC_PORTSPEED_UNKNOWN;
2083 switch (ha->link_data_rate) {
2084 case QLAFX00_PORT_SPEED_2G:
2085 speed = FC_PORTSPEED_2GBIT;
2087 case QLAFX00_PORT_SPEED_4G:
2088 speed = FC_PORTSPEED_4GBIT;
2090 case QLAFX00_PORT_SPEED_8G:
2091 speed = FC_PORTSPEED_8GBIT;
2093 case QLAFX00_PORT_SPEED_10G:
2094 speed = FC_PORTSPEED_10GBIT;
2097 fc_host_speed(shost) = speed;
2100 /** QLAFX00 specific ISR implementation functions */
2103 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2104 uint32_t sense_len, struct rsp_que *rsp, int res)
2106 struct scsi_qla_host *vha = sp->fcport->vha;
2107 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2108 uint32_t track_sense_len;
2110 SET_FW_SENSE_LEN(sp, sense_len);
2112 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2113 sense_len = SCSI_SENSE_BUFFERSIZE;
2115 SET_CMD_SENSE_LEN(sp, sense_len);
2116 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2117 track_sense_len = sense_len;
2119 if (sense_len > par_sense_len)
2120 sense_len = par_sense_len;
2122 memcpy(cp->sense_buffer, sense_data, sense_len);
2124 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2126 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2127 track_sense_len -= sense_len;
2128 SET_CMD_SENSE_LEN(sp, track_sense_len);
2130 ql_dbg(ql_dbg_io, vha, 0x304d,
2131 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2132 sense_len, par_sense_len, track_sense_len);
2133 if (GET_FW_SENSE_LEN(sp) > 0) {
2134 rsp->status_srb = sp;
2139 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2140 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2141 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
2143 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2144 cp->sense_buffer, sense_len);
2149 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2150 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2151 __le16 sstatus, __le16 cpstatus)
2153 struct srb_iocb *tmf;
2155 tmf = &sp->u.iocb_cmd;
2156 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
2157 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
2158 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
2159 tmf->u.tmf.comp_status = cpstatus;
2160 sp->done(vha, sp, 0);
2164 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2165 struct abort_iocb_entry_fx00 *pkt)
2167 const char func[] = "ABT_IOCB";
2169 struct srb_iocb *abt;
2171 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2175 abt = &sp->u.iocb_cmd;
2176 abt->u.abt.comp_status = pkt->tgt_id_sts;
2177 sp->done(vha, sp, 0);
2181 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2182 struct ioctl_iocb_entry_fx00 *pkt)
2184 const char func[] = "IOSB_IOCB";
2186 struct fc_bsg_job *bsg_job;
2187 struct srb_iocb *iocb_job;
2189 struct qla_mt_iocb_rsp_fx00 fstatus;
2190 uint8_t *fw_sts_ptr;
2192 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2196 if (sp->type == SRB_FXIOCB_DCMD) {
2197 iocb_job = &sp->u.iocb_cmd;
2198 iocb_job->u.fxiocb.seq_number = pkt->seq_no;
2199 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
2200 iocb_job->u.fxiocb.result = pkt->status;
2201 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2202 iocb_job->u.fxiocb.req_data =
2205 bsg_job = sp->u.bsg_job;
2207 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2209 fstatus.reserved_1 = pkt->reserved_0;
2210 fstatus.func_type = pkt->comp_func_num;
2211 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2212 fstatus.ioctl_data = pkt->dataword_r;
2213 fstatus.adapid = pkt->adapid;
2214 fstatus.reserved_2 = pkt->dataword_r_extra;
2215 fstatus.res_count = pkt->residuallen;
2216 fstatus.status = pkt->status;
2217 fstatus.seq_number = pkt->seq_no;
2218 memcpy(fstatus.reserved_3,
2219 pkt->reserved_2, 20 * sizeof(uint8_t));
2221 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
2222 sizeof(struct fc_bsg_reply);
2224 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2225 sizeof(struct qla_mt_iocb_rsp_fx00));
2226 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2227 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2229 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2230 sp->fcport->vha, 0x5080,
2231 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
2233 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2234 sp->fcport->vha, 0x5074,
2235 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2237 res = bsg_job->reply->result = DID_OK << 16;
2238 bsg_job->reply->reply_payload_rcv_len =
2239 bsg_job->reply_payload.payload_len;
2241 sp->done(vha, sp, res);
2245 * qlafx00_status_entry() - Process a Status IOCB entry.
2246 * @ha: SCSI driver HA context
2247 * @pkt: Entry pointer
2250 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2254 struct scsi_cmnd *cp;
2255 struct sts_entry_fx00 *sts;
2258 __le16 lscsi_status;
2260 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2262 uint8_t *rsp_info = NULL, *sense_data = NULL;
2263 struct qla_hw_data *ha = vha->hw;
2264 uint32_t hindex, handle;
2266 struct req_que *req;
2270 sts = (struct sts_entry_fx00 *) pkt;
2272 comp_status = sts->comp_status;
2273 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
2274 hindex = sts->handle;
2275 handle = LSW(hindex);
2278 req = ha->req_q_map[que];
2280 /* Validate handle. */
2281 if (handle < req->num_outstanding_cmds)
2282 sp = req->outstanding_cmds[handle];
2287 ql_dbg(ql_dbg_io, vha, 0x3034,
2288 "Invalid status handle (0x%x).\n", handle);
2290 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2291 qla2xxx_wake_dpc(vha);
2295 if (sp->type == SRB_TM_CMD) {
2296 req->outstanding_cmds[handle] = NULL;
2297 qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2298 scsi_status, comp_status);
2302 /* Fast path completion. */
2303 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2304 qla2x00_process_completed_request(vha, req, handle);
2308 req->outstanding_cmds[handle] = NULL;
2309 cp = GET_CMD_SP(sp);
2311 ql_dbg(ql_dbg_io, vha, 0x3048,
2312 "Command already returned (0x%x/%p).\n",
2318 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
2320 fcport = sp->fcport;
2322 sense_len = par_sense_len = rsp_info_len = resid_len =
2324 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
2325 sense_len = sts->sense_len;
2326 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2327 | (uint16_t)SS_RESIDUAL_OVER)))
2328 resid_len = le32_to_cpu(sts->residual_len);
2329 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
2330 fw_resid_len = le32_to_cpu(sts->residual_len);
2331 rsp_info = sense_data = sts->data;
2332 par_sense_len = sizeof(sts->data);
2334 /* Check for overrun. */
2335 if (comp_status == CS_COMPLETE &&
2336 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
2337 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
2340 * Based on Host and scsi status generate status code for Linux
2342 switch (le16_to_cpu(comp_status)) {
2345 if (scsi_status == 0) {
2349 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2350 | (uint16_t)SS_RESIDUAL_OVER))) {
2352 scsi_set_resid(cp, resid);
2354 if (!lscsi_status &&
2355 ((unsigned)(scsi_bufflen(cp) - resid) <
2357 ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2358 "Mid-layer underflow "
2359 "detected (0x%x of 0x%x bytes).\n",
2360 resid, scsi_bufflen(cp));
2362 res = DID_ERROR << 16;
2366 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2369 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2370 ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2371 "QUEUE FULL detected.\n");
2375 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2378 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2379 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2382 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2386 case CS_DATA_UNDERRUN:
2387 /* Use F/W calculated residual length. */
2388 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2389 resid = fw_resid_len;
2392 scsi_set_resid(cp, resid);
2393 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
2394 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2395 && fw_resid_len != resid_len) {
2396 ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2397 "Dropped frame(s) detected "
2398 "(0x%x of 0x%x bytes).\n",
2399 resid, scsi_bufflen(cp));
2401 res = DID_ERROR << 16 |
2402 le16_to_cpu(lscsi_status);
2403 goto check_scsi_status;
2406 if (!lscsi_status &&
2407 ((unsigned)(scsi_bufflen(cp) - resid) <
2409 ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2410 "Mid-layer underflow "
2411 "detected (0x%x of 0x%x bytes, "
2412 "cp->underflow: 0x%x).\n",
2413 resid, scsi_bufflen(cp), cp->underflow);
2415 res = DID_ERROR << 16;
2418 } else if (lscsi_status !=
2419 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
2420 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
2422 * scsi status of task set and busy are considered
2423 * to be task not completed.
2426 ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2427 "Dropped frame(s) detected (0x%x "
2428 "of 0x%x bytes).\n", resid,
2431 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
2432 goto check_scsi_status;
2434 ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2435 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2436 scsi_status, lscsi_status);
2439 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2444 * Check to see if SCSI Status is non zero. If so report SCSI
2447 if (lscsi_status != 0) {
2449 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2450 ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2451 "QUEUE FULL detected.\n");
2456 cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2459 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2461 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2464 qlafx00_handle_sense(sp, sense_data, par_sense_len,
2465 sense_len, rsp, res);
2469 case CS_PORT_LOGGED_OUT:
2470 case CS_PORT_CONFIG_CHG:
2473 case CS_PORT_UNAVAILABLE:
2478 * We are going to have the fc class block the rport
2479 * while we try to recover so instruct the mid layer
2480 * to requeue until the class decides how to handle this.
2482 res = DID_TRANSPORT_DISRUPTED << 16;
2484 ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2485 "Port down status: port-state=0x%x.\n",
2486 atomic_read(&fcport->state));
2488 if (atomic_read(&fcport->state) == FCS_ONLINE)
2489 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2493 res = DID_RESET << 16;
2497 res = DID_ERROR << 16;
2502 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2503 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2504 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2505 "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2506 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2507 comp_status, scsi_status, res, vha->host_no,
2508 cp->device->id, cp->device->lun, fcport->tgt_id,
2509 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2510 rsp_info, resid_len, fw_resid_len, sense_len,
2511 par_sense_len, rsp_info_len);
2513 if (rsp->status_srb == NULL)
2514 sp->done(ha, sp, res);
2518 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2519 * @ha: SCSI driver HA context
2520 * @pkt: Entry pointer
2522 * Extended sense data.
2525 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2527 uint8_t sense_sz = 0;
2528 struct qla_hw_data *ha = rsp->hw;
2529 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2530 srb_t *sp = rsp->status_srb;
2531 struct scsi_cmnd *cp;
2536 ql_dbg(ql_dbg_io, vha, 0x3037,
2537 "no SP, sp = %p\n", sp);
2541 if (!GET_FW_SENSE_LEN(sp)) {
2542 ql_dbg(ql_dbg_io, vha, 0x304b,
2543 "no fw sense data, sp = %p\n", sp);
2546 cp = GET_CMD_SP(sp);
2548 ql_log(ql_log_warn, vha, 0x303b,
2549 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2551 rsp->status_srb = NULL;
2555 if (!GET_CMD_SENSE_LEN(sp)) {
2556 ql_dbg(ql_dbg_io, vha, 0x304c,
2557 "no sense data, sp = %p\n", sp);
2559 sense_len = GET_CMD_SENSE_LEN(sp);
2560 sense_ptr = GET_CMD_SENSE_PTR(sp);
2561 ql_dbg(ql_dbg_io, vha, 0x304f,
2562 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2563 sp, sense_len, sense_ptr);
2565 if (sense_len > sizeof(pkt->data))
2566 sense_sz = sizeof(pkt->data);
2568 sense_sz = sense_len;
2570 /* Move sense data. */
2571 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2572 (uint8_t *)pkt, sizeof(sts_cont_entry_t));
2573 memcpy(sense_ptr, pkt->data, sense_sz);
2574 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2575 sense_ptr, sense_sz);
2577 sense_len -= sense_sz;
2578 sense_ptr += sense_sz;
2580 SET_CMD_SENSE_PTR(sp, sense_ptr);
2581 SET_CMD_SENSE_LEN(sp, sense_len);
2583 sense_len = GET_FW_SENSE_LEN(sp);
2584 sense_len = (sense_len > sizeof(pkt->data)) ?
2585 (sense_len - sizeof(pkt->data)) : 0;
2586 SET_FW_SENSE_LEN(sp, sense_len);
2588 /* Place command on done queue. */
2589 if (sense_len == 0) {
2590 rsp->status_srb = NULL;
2591 sp->done(ha, sp, cp->result);
2596 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2597 * @ha: SCSI driver HA context
2600 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2601 struct rsp_que *rsp, void *pkt)
2604 struct multi_sts_entry_fx00 *stsmfx;
2605 struct qla_hw_data *ha = vha->hw;
2606 uint32_t handle, hindex, handle_count, i;
2608 struct req_que *req;
2611 stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2613 handle_count = stsmfx->handle_count;
2615 if (handle_count > MAX_HANDLE_COUNT) {
2616 ql_dbg(ql_dbg_io, vha, 0x3035,
2617 "Invalid handle count (0x%x).\n", handle_count);
2618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2619 qla2xxx_wake_dpc(vha);
2623 handle_ptr = &stsmfx->handles[0];
2625 for (i = 0; i < handle_count; i++) {
2626 hindex = le32_to_cpu(*handle_ptr);
2627 handle = LSW(hindex);
2629 req = ha->req_q_map[que];
2631 /* Validate handle. */
2632 if (handle < req->num_outstanding_cmds)
2633 sp = req->outstanding_cmds[handle];
2638 ql_dbg(ql_dbg_io, vha, 0x3044,
2639 "Invalid status handle (0x%x).\n", handle);
2640 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2641 qla2xxx_wake_dpc(vha);
2644 qla2x00_process_completed_request(vha, req, handle);
2650 * qlafx00_error_entry() - Process an error entry.
2651 * @ha: SCSI driver HA context
2652 * @pkt: Entry pointer
2655 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2656 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
2659 struct qla_hw_data *ha = vha->hw;
2660 const char func[] = "ERROR-IOCB";
2662 struct req_que *req = NULL;
2663 int res = DID_ERROR << 16;
2665 ql_dbg(ql_dbg_async, vha, 0x507f,
2666 "type of error status in response: 0x%x\n", estatus);
2668 req = ha->req_q_map[que];
2670 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2672 sp->done(ha, sp, res);
2676 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2677 qla2xxx_wake_dpc(vha);
2681 * qlafx00_process_response_queue() - Process response queue entries.
2682 * @ha: SCSI driver HA context
2685 qlafx00_process_response_queue(struct scsi_qla_host *vha,
2686 struct rsp_que *rsp)
2688 struct sts_entry_fx00 *pkt;
2690 uint16_t lreq_q_in = 0;
2691 uint16_t lreq_q_out = 0;
2693 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
2694 lreq_q_out = rsp->ring_index;
2696 while (lreq_q_in != lreq_q_out) {
2697 lptr = rsp->ring_ptr;
2698 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2699 sizeof(rsp->rsp_pkt));
2700 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2704 if (rsp->ring_index == rsp->length) {
2706 rsp->ring_index = 0;
2707 rsp->ring_ptr = rsp->ring;
2712 if (pkt->entry_status != 0 &&
2713 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2714 qlafx00_error_entry(vha, rsp,
2715 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2720 switch (pkt->entry_type) {
2721 case STATUS_TYPE_FX00:
2722 qlafx00_status_entry(vha, rsp, pkt);
2725 case STATUS_CONT_TYPE_FX00:
2726 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2729 case MULTI_STATUS_TYPE_FX00:
2730 qlafx00_multistatus_entry(vha, rsp, pkt);
2733 case ABORT_IOCB_TYPE_FX00:
2734 qlafx00_abort_iocb_entry(vha, rsp->req,
2735 (struct abort_iocb_entry_fx00 *)pkt);
2738 case IOCTL_IOSB_TYPE_FX00:
2739 qlafx00_ioctl_iosb_entry(vha, rsp->req,
2740 (struct ioctl_iocb_entry_fx00 *)pkt);
2743 /* Type Not Supported. */
2744 ql_dbg(ql_dbg_async, vha, 0x5081,
2745 "Received unknown response pkt type %x "
2746 "entry status=%x.\n",
2747 pkt->entry_type, pkt->entry_status);
2752 /* Adjust ring index */
2753 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2757 * qlafx00_async_event() - Process aynchronous events.
2758 * @ha: SCSI driver HA context
2761 qlafx00_async_event(scsi_qla_host_t *vha)
2763 struct qla_hw_data *ha = vha->hw;
2764 struct device_reg_fx00 __iomem *reg;
2767 reg = &ha->iobase->ispfx00;
2768 /* Setup to process RIO completion. */
2769 switch (ha->aenmb[0]) {
2770 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
2771 ql_log(ql_log_warn, vha, 0x5079,
2772 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2773 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2776 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
2777 ql_dbg(ql_dbg_async, vha, 0x5076,
2778 "Asynchronous FW shutdown requested.\n");
2779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2780 qla2xxx_wake_dpc(vha);
2783 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2784 ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1);
2785 ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2);
2786 ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3);
2787 ql_dbg(ql_dbg_async, vha, 0x5077,
2788 "Asynchronous port Update received "
2789 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2790 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2794 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
2795 ql_log(ql_log_info, vha, 0x5085,
2796 "Asynchronous over temperature event received "
2801 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
2802 ql_log(ql_log_info, vha, 0x5086,
2803 "Asynchronous normal temperature event received "
2808 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
2809 ql_log(ql_log_info, vha, 0x5083,
2810 "Asynchronous critical temperature event received "
2816 ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1);
2817 ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2);
2818 ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3);
2819 ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4);
2820 ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5);
2821 ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6);
2822 ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7);
2823 ql_dbg(ql_dbg_async, vha, 0x5078,
2824 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2825 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2826 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2829 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2830 (uint32_t *)ha->aenmb, data_size);
2835 * qlafx00x_mbx_completion() - Process mailbox command completions.
2836 * @ha: SCSI driver HA context
2837 * @mb16: Mailbox16 register
2840 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2843 uint32_t __iomem *wptr;
2844 struct qla_hw_data *ha = vha->hw;
2845 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2848 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2850 /* Load return mailbox registers. */
2851 ha->flags.mbox_int = 1;
2852 ha->mailbox_out32[0] = mb0;
2853 wptr = (uint32_t __iomem *)®->mailbox17;
2855 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2856 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
2862 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2864 * @dev_id: SCSI driver HA context
2866 * Called by system whenever the host adapter generates an interrupt.
2868 * Returns handled flag.
2871 qlafx00_intr_handler(int irq, void *dev_id)
2873 scsi_qla_host_t *vha;
2874 struct qla_hw_data *ha;
2875 struct device_reg_fx00 __iomem *reg;
2880 struct rsp_que *rsp;
2881 unsigned long flags;
2882 uint32_t clr_intr = 0;
2883 uint32_t intr_stat = 0;
2885 rsp = (struct rsp_que *) dev_id;
2887 ql_log(ql_log_info, NULL, 0x507d,
2888 "%s: NULL response queue pointer.\n", __func__);
2893 reg = &ha->iobase->ispfx00;
2896 if (unlikely(pci_channel_offline(ha->pdev)))
2899 spin_lock_irqsave(&ha->hardware_lock, flags);
2900 vha = pci_get_drvdata(ha->pdev);
2901 for (iter = 50; iter--; clr_intr = 0) {
2902 stat = QLAFX00_RD_INTR_REG(ha);
2903 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2905 intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2909 if (stat & QLAFX00_INTR_MB_CMPLT) {
2910 mb[0] = RD_REG_WORD(®->mailbox16);
2911 qlafx00_mbx_completion(vha, mb[0]);
2912 status |= MBX_INTERRUPT;
2913 clr_intr |= QLAFX00_INTR_MB_CMPLT;
2915 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
2916 ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0);
2917 qlafx00_async_event(vha);
2918 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
2920 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
2921 qlafx00_process_response_queue(vha, rsp);
2922 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
2925 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2926 QLAFX00_RD_INTR_REG(ha);
2929 qla2x00_handle_mbx_completion(ha, status);
2930 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2935 /** QLAFX00 specific IOCB implementation functions */
2937 static inline cont_a64_entry_t *
2938 qlafx00_prep_cont_type1_iocb(struct req_que *req,
2939 cont_a64_entry_t *lcont_pkt)
2941 cont_a64_entry_t *cont_pkt;
2943 /* Adjust ring index. */
2945 if (req->ring_index == req->length) {
2946 req->ring_index = 0;
2947 req->ring_ptr = req->ring;
2952 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2954 /* Load packet defaults. */
2955 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
2961 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
2962 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
2964 uint16_t avail_dsds;
2966 scsi_qla_host_t *vha;
2967 struct scsi_cmnd *cmd;
2968 struct scatterlist *sg;
2970 struct req_que *req;
2971 cont_a64_entry_t lcont_pkt;
2972 cont_a64_entry_t *cont_pkt;
2974 vha = sp->fcport->vha;
2977 cmd = GET_CMD_SP(sp);
2981 /* Update entry type to indicate Command Type 3 IOCB */
2982 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
2984 /* No data transfer */
2985 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2986 lcmd_pkt->byte_count = cpu_to_le32(0);
2990 /* Set transfer direction */
2991 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2992 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
2993 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
2994 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2995 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
2996 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
2999 /* One DSD is available in the Command Type 3 IOCB */
3001 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
3003 /* Load data segments */
3004 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3007 /* Allocate additional continuation packets? */
3008 if (avail_dsds == 0) {
3010 * Five DSDs are available in the Continuation
3013 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3015 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3016 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
3021 sle_dma = sg_dma_address(sg);
3022 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3023 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3024 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3026 if (avail_dsds == 0 && cont == 1) {
3028 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3029 REQUEST_ENTRY_SIZE);
3033 if (avail_dsds != 0 && cont == 1) {
3034 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3035 REQUEST_ENTRY_SIZE);
3040 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3041 * @sp: command to send to the ISP
3043 * Returns non-zero if a failure occurred, else zero.
3046 qlafx00_start_scsi(srb_t *sp)
3049 unsigned long flags;
3055 struct req_que *req = NULL;
3056 struct rsp_que *rsp = NULL;
3057 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3058 struct scsi_qla_host *vha = sp->fcport->vha;
3059 struct qla_hw_data *ha = vha->hw;
3060 struct cmd_type_7_fx00 *cmd_pkt;
3061 struct cmd_type_7_fx00 lcmd_pkt;
3062 struct scsi_lun llun;
3064 /* Setup device pointers. */
3065 rsp = ha->rsp_q_map[0];
3068 /* So we know we haven't pci_map'ed anything yet */
3071 /* Acquire ring specific lock */
3072 spin_lock_irqsave(&ha->hardware_lock, flags);
3074 /* Check for room in outstanding command list. */
3075 handle = req->current_outstanding_cmd;
3076 for (index = 1; index < req->num_outstanding_cmds; index++) {
3078 if (handle == req->num_outstanding_cmds)
3080 if (!req->outstanding_cmds[handle])
3083 if (index == req->num_outstanding_cmds)
3086 /* Map the sg table so we have an accurate count of sg entries needed */
3087 if (scsi_sg_count(cmd)) {
3088 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3089 scsi_sg_count(cmd), cmd->sc_data_direction);
3090 if (unlikely(!nseg))
3096 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3097 if (req->cnt < (req_cnt + 2)) {
3098 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
3100 if (req->ring_index < cnt)
3101 req->cnt = cnt - req->ring_index;
3103 req->cnt = req->length -
3104 (req->ring_index - cnt);
3105 if (req->cnt < (req_cnt + 2))
3109 /* Build command packet. */
3110 req->current_outstanding_cmd = handle;
3111 req->outstanding_cmds[handle] = sp;
3112 sp->handle = handle;
3113 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3114 req->cnt -= req_cnt;
3116 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3118 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3120 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3121 lcmd_pkt.reserved_0 = 0;
3122 lcmd_pkt.port_path_ctrl = 0;
3123 lcmd_pkt.reserved_1 = 0;
3124 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3125 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3127 int_to_scsilun(cmd->device->lun, &llun);
3128 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3129 sizeof(lcmd_pkt.lun));
3131 /* Load SCSI command packet. */
3132 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3133 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3135 /* Build IOCB segments */
3136 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3138 /* Set total data segment count. */
3139 lcmd_pkt.entry_count = (uint8_t)req_cnt;
3141 /* Specify response queue number where completion should happen */
3142 lcmd_pkt.entry_status = (uint8_t) rsp->id;
3144 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3145 (uint8_t *)cmd->cmnd, cmd->cmd_len);
3146 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3147 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
3149 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3152 /* Adjust ring index. */
3154 if (req->ring_index == req->length) {
3155 req->ring_index = 0;
3156 req->ring_ptr = req->ring;
3160 sp->flags |= SRB_DMA_VALID;
3162 /* Set chip new ring index. */
3163 WRT_REG_DWORD(req->req_q_in, req->ring_index);
3164 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3166 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3171 scsi_dma_unmap(cmd);
3173 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3175 return QLA_FUNCTION_FAILED;
3179 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3181 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3182 scsi_qla_host_t *vha = sp->fcport->vha;
3183 struct req_que *req = vha->req;
3184 struct tsk_mgmt_entry_fx00 tm_iocb;
3185 struct scsi_lun llun;
3187 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3188 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3189 tm_iocb.entry_count = 1;
3190 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3191 tm_iocb.reserved_0 = 0;
3192 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3193 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3194 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
3195 int_to_scsilun(fxio->u.tmf.lun, &llun);
3196 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3197 sizeof(struct scsi_lun));
3200 memcpy((void *)ptm_iocb, &tm_iocb,
3201 sizeof(struct tsk_mgmt_entry_fx00));
3206 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3208 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3209 scsi_qla_host_t *vha = sp->fcport->vha;
3210 struct req_que *req = vha->req;
3211 struct abort_iocb_entry_fx00 abt_iocb;
3213 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3214 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3215 abt_iocb.entry_count = 1;
3216 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3217 abt_iocb.abort_handle =
3218 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
3219 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3220 abt_iocb.req_que_no = cpu_to_le16(req->id);
3222 memcpy((void *)pabt_iocb, &abt_iocb,
3223 sizeof(struct abort_iocb_entry_fx00));
3228 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3230 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3231 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3232 struct fc_bsg_job *bsg_job;
3233 struct fxdisc_entry_fx00 fx_iocb;
3234 uint8_t entry_cnt = 1;
3236 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3237 fx_iocb.entry_type = FX00_IOCB_TYPE;
3238 fx_iocb.handle = cpu_to_le32(sp->handle);
3239 fx_iocb.entry_count = entry_cnt;
3241 if (sp->type == SRB_FXIOCB_DCMD) {
3243 sp->u.iocb_cmd.u.fxiocb.req_func_type;
3244 fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
3245 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
3246 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
3247 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
3248 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
3250 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3251 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3252 fx_iocb.req_xfrcnt =
3253 cpu_to_le16(fxio->u.fxiocb.req_len);
3254 fx_iocb.dseg_rq_address[0] =
3255 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
3256 fx_iocb.dseg_rq_address[1] =
3257 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3258 fx_iocb.dseg_rq_len =
3259 cpu_to_le32(fxio->u.fxiocb.req_len);
3262 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3263 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3264 fx_iocb.rsp_xfrcnt =
3265 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3266 fx_iocb.dseg_rsp_address[0] =
3267 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
3268 fx_iocb.dseg_rsp_address[1] =
3269 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3270 fx_iocb.dseg_rsp_len =
3271 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3274 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3275 fx_iocb.dataword = fxio->u.fxiocb.req_data;
3277 fx_iocb.flags = fxio->u.fxiocb.flags;
3279 struct scatterlist *sg;
3280 bsg_job = sp->u.bsg_job;
3281 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3282 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
3284 fx_iocb.func_num = piocb_rqst->func_type;
3285 fx_iocb.adapid = piocb_rqst->adapid;
3286 fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3287 fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3288 fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3289 fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3290 fx_iocb.dataword = piocb_rqst->dataword;
3291 fx_iocb.req_xfrcnt = piocb_rqst->req_len;
3292 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
3294 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3295 int avail_dsds, tot_dsds;
3296 cont_a64_entry_t lcont_pkt;
3297 cont_a64_entry_t *cont_pkt = NULL;
3299 int index = 0, cont = 0;
3301 fx_iocb.req_dsdcnt =
3302 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3304 bsg_job->request_payload.sg_cnt;
3305 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
3307 for_each_sg(bsg_job->request_payload.sg_list, sg,
3311 /* Allocate additional continuation packets? */
3312 if (avail_dsds == 0) {
3314 * Five DSDs are available in the Cont.
3317 memset(&lcont_pkt, 0,
3318 REQUEST_ENTRY_SIZE);
3320 qlafx00_prep_cont_type1_iocb(
3321 sp->fcport->vha->req,
3323 cur_dsd = (__le32 *)
3324 lcont_pkt.dseg_0_address;
3330 sle_dma = sg_dma_address(sg);
3331 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3332 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3333 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3336 if (avail_dsds == 0 && cont == 1) {
3339 (void __iomem *)cont_pkt,
3340 &lcont_pkt, REQUEST_ENTRY_SIZE);
3342 ql_dbg_user + ql_dbg_verbose,
3343 sp->fcport->vha, 0x3042,
3344 (uint8_t *)&lcont_pkt,
3345 REQUEST_ENTRY_SIZE);
3348 if (avail_dsds != 0 && cont == 1) {
3349 memcpy_toio((void __iomem *)cont_pkt,
3350 &lcont_pkt, REQUEST_ENTRY_SIZE);
3351 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3352 sp->fcport->vha, 0x3043,
3353 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3357 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3358 int avail_dsds, tot_dsds;
3359 cont_a64_entry_t lcont_pkt;
3360 cont_a64_entry_t *cont_pkt = NULL;
3362 int index = 0, cont = 0;
3364 fx_iocb.rsp_dsdcnt =
3365 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3366 tot_dsds = bsg_job->reply_payload.sg_cnt;
3367 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
3370 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3374 /* Allocate additional continuation packets? */
3375 if (avail_dsds == 0) {
3377 * Five DSDs are available in the Cont.
3380 memset(&lcont_pkt, 0,
3381 REQUEST_ENTRY_SIZE);
3383 qlafx00_prep_cont_type1_iocb(
3384 sp->fcport->vha->req,
3386 cur_dsd = (__le32 *)
3387 lcont_pkt.dseg_0_address;
3393 sle_dma = sg_dma_address(sg);
3394 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3395 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3396 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3399 if (avail_dsds == 0 && cont == 1) {
3401 memcpy_toio((void __iomem *)cont_pkt,
3403 REQUEST_ENTRY_SIZE);
3405 ql_dbg_user + ql_dbg_verbose,
3406 sp->fcport->vha, 0x3045,
3407 (uint8_t *)&lcont_pkt,
3408 REQUEST_ENTRY_SIZE);
3411 if (avail_dsds != 0 && cont == 1) {
3412 memcpy_toio((void __iomem *)cont_pkt,
3413 &lcont_pkt, REQUEST_ENTRY_SIZE);
3414 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3415 sp->fcport->vha, 0x3046,
3416 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3420 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3421 fx_iocb.dataword = piocb_rqst->dataword;
3422 fx_iocb.flags = piocb_rqst->flags;
3423 fx_iocb.entry_count = entry_cnt;
3426 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3427 sp->fcport->vha, 0x3047,
3428 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3430 memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
3431 sizeof(struct fxdisc_entry_fx00));