1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
13 #define IS_PPCARCH true
15 #define IS_PPCARCH false
18 static struct mb_cmd_name {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
28 static const char *mb_to_str(uint16_t cmd)
31 struct mb_cmd_name *e;
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
41 static struct rom_cmd {
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_READ_RAM_WORD },
47 { MBC_MAILBOX_REGISTER_TEST },
48 { MBC_VERIFY_CHECKSUM },
49 { MBC_GET_FIRMWARE_VERSION },
50 { MBC_LOAD_RISC_RAM },
51 { MBC_DUMP_RISC_RAM },
52 { MBC_LOAD_RISC_RAM_EXTENDED },
53 { MBC_DUMP_RISC_RAM_EXTENDED },
54 { MBC_WRITE_RAM_WORD_EXTENDED },
55 { MBC_READ_RAM_EXTENDED },
56 { MBC_GET_RESOURCE_COUNTS },
57 { MBC_SET_FIRMWARE_OPTION },
58 { MBC_MID_INITIALIZE_FIRMWARE },
59 { MBC_GET_FIRMWARE_STATE },
60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 { MBC_GET_RETRY_COUNT },
62 { MBC_TRACE_CONTROL },
63 { MBC_INITIALIZE_MULTIQ },
64 { MBC_IOCB_COMMAND_A64 },
65 { MBC_GET_ADAPTER_LOOP_ID },
67 { MBC_SET_RNID_PARAMS },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
72 static int is_rom_cmd(uint16_t cmd)
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
109 unsigned long flags = 0;
111 uint8_t abort_active, eeh_delay;
113 uint16_t command = 0;
115 __le16 __iomem *optr;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 "PCI channel failed permanently, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 if (vha->device_flags & DFLG_DEV_FAILED) {
133 ql_log(ql_log_warn, vha, 0x1002,
134 "Device in failed state, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
138 /* if PCI error, then avoid mbx processing.*/
139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 ql_log(ql_log_warn, vha, 0xd04e,
142 "PCI error, exiting.\n");
143 return QLA_FUNCTION_TIMEOUT;
147 io_lock_on = base_vha->flags.init_done;
150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 chip_reset = ha->chip_reset;
153 if (ha->flags.pci_channel_io_perm_failure) {
154 ql_log(ql_log_warn, vha, 0x1003,
155 "Perm failure on EEH timeout MBX, exiting.\n");
156 return QLA_FUNCTION_TIMEOUT;
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 /* Setting Link-Down error */
161 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 ql_log(ql_log_warn, vha, 0x1004,
163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 return QLA_FUNCTION_TIMEOUT;
167 /* check if ISP abort is active and return cmd with timeout */
168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
172 ql_log(ql_log_info, vha, 0x1005,
173 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
175 return QLA_FUNCTION_TIMEOUT;
178 atomic_inc(&ha->num_pend_mbx_stage1);
180 * Wait for active mailbox commands to finish by waiting at most tov
181 * seconds. This is to serialize actual issuing of mailbox cmds during
182 * non ISP abort time.
184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
185 /* Timeout occurred. Return error. */
186 ql_log(ql_log_warn, vha, 0xd035,
187 "Cmd access timeout, cmd=0x%x, Exiting.\n",
190 atomic_dec(&ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
196 ql_log(ql_log_warn, vha, 0xd035,
197 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
204 /* Save mailbox command for debug */
207 ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
210 spin_lock_irqsave(&ha->hardware_lock, flags);
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
218 ha->flags.mbox_busy = 1;
220 /* Load mailbox registers. */
222 optr = ®->isp82.mailbox_in[0];
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 optr = ®->isp24.mailbox0;
226 optr = MAILBOX_REG(ha, ®->isp, 0);
229 command = mcp->mb[0];
230 mboxes = mcp->out_mb;
232 ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 "Mailbox registers (OUT):\n");
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, ®->isp, 8);
237 if (mboxes & BIT_0) {
238 ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 wrt_reg_word(optr, *iptr);
248 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
249 "I/O Address = %p.\n", optr);
251 /* Issue set host interrupt command to send cmd out. */
252 ha->flags.mbox_int = 0;
253 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
255 /* Unlock mbx registers and wait for interrupt */
256 ql_dbg(ql_dbg_mbx, vha, 0x100f,
257 "Going to unlock irq & waiting for interrupts. "
258 "jiffies=%lx.\n", jiffies);
260 /* Wait for mbx cmd completion until timeout */
261 atomic_inc(&ha->num_pend_mbx_stage2);
262 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
263 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
266 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
267 else if (IS_FWI2_CAPABLE(ha))
268 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
270 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
271 spin_unlock_irqrestore(&ha->hardware_lock, flags);
274 atomic_inc(&ha->num_pend_mbx_stage3);
275 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
277 if (chip_reset != ha->chip_reset) {
278 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
280 spin_lock_irqsave(&ha->hardware_lock, flags);
281 ha->flags.mbox_busy = 0;
282 spin_unlock_irqrestore(&ha->hardware_lock,
284 atomic_dec(&ha->num_pend_mbx_stage2);
285 atomic_dec(&ha->num_pend_mbx_stage3);
289 ql_dbg(ql_dbg_mbx, vha, 0x117a,
290 "cmd=%x Timeout.\n", command);
291 spin_lock_irqsave(&ha->hardware_lock, flags);
292 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
293 spin_unlock_irqrestore(&ha->hardware_lock, flags);
295 } else if (ha->flags.purge_mbox ||
296 chip_reset != ha->chip_reset) {
297 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
299 spin_lock_irqsave(&ha->hardware_lock, flags);
300 ha->flags.mbox_busy = 0;
301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
302 atomic_dec(&ha->num_pend_mbx_stage2);
303 atomic_dec(&ha->num_pend_mbx_stage3);
307 atomic_dec(&ha->num_pend_mbx_stage3);
309 if (time_after(jiffies, wait_time + 5 * HZ))
310 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
311 command, jiffies_to_msecs(jiffies - wait_time));
313 ql_dbg(ql_dbg_mbx, vha, 0x1011,
314 "Cmd=%x Polling Mode.\n", command);
316 if (IS_P3P_TYPE(ha)) {
317 if (rd_reg_dword(®->isp82.hint) &
318 HINT_MBX_INT_PENDING) {
319 ha->flags.mbox_busy = 0;
320 spin_unlock_irqrestore(&ha->hardware_lock,
322 atomic_dec(&ha->num_pend_mbx_stage2);
323 ql_dbg(ql_dbg_mbx, vha, 0x1012,
324 "Pending mailbox timeout, exiting.\n");
326 rval = QLA_FUNCTION_TIMEOUT;
329 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
330 } else if (IS_FWI2_CAPABLE(ha))
331 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
333 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
334 spin_unlock_irqrestore(&ha->hardware_lock, flags);
336 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
337 while (!ha->flags.mbox_int) {
338 if (ha->flags.purge_mbox ||
339 chip_reset != ha->chip_reset) {
340 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
342 spin_lock_irqsave(&ha->hardware_lock, flags);
343 ha->flags.mbox_busy = 0;
344 spin_unlock_irqrestore(&ha->hardware_lock,
346 atomic_dec(&ha->num_pend_mbx_stage2);
351 if (time_after(jiffies, wait_time))
354 /* Check for pending interrupts. */
355 qla2x00_poll(ha->rsp_q_map[0]);
357 if (!ha->flags.mbox_int &&
359 command == MBC_LOAD_RISC_RAM_EXTENDED))
362 ql_dbg(ql_dbg_mbx, vha, 0x1013,
364 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
366 atomic_dec(&ha->num_pend_mbx_stage2);
368 /* Check whether we timed out */
369 if (ha->flags.mbox_int) {
372 ql_dbg(ql_dbg_mbx, vha, 0x1014,
373 "Cmd=%x completed.\n", command);
375 /* Got interrupt. Clear the flag. */
376 ha->flags.mbox_int = 0;
377 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
379 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
380 spin_lock_irqsave(&ha->hardware_lock, flags);
381 ha->flags.mbox_busy = 0;
382 spin_unlock_irqrestore(&ha->hardware_lock, flags);
384 /* Setting Link-Down error */
385 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
387 rval = QLA_FUNCTION_FAILED;
388 ql_log(ql_log_warn, vha, 0xd048,
389 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
393 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
394 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
395 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
396 MBS_COMMAND_COMPLETE);
397 rval = QLA_FUNCTION_FAILED;
400 /* Load return mailbox registers. */
402 iptr = (uint16_t *)&ha->mailbox_out[0];
405 ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 "Mailbox registers (IN):\n");
407 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 if (mboxes & BIT_0) {
410 ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 "mbox[%d]->0x%04x\n", cnt, *iptr2);
421 uint32_t ictrl, host_status, hccr;
424 if (IS_FWI2_CAPABLE(ha)) {
425 mb[0] = rd_reg_word(®->isp24.mailbox0);
426 mb[1] = rd_reg_word(®->isp24.mailbox1);
427 mb[2] = rd_reg_word(®->isp24.mailbox2);
428 mb[3] = rd_reg_word(®->isp24.mailbox3);
429 mb[7] = rd_reg_word(®->isp24.mailbox7);
430 ictrl = rd_reg_dword(®->isp24.ictrl);
431 host_status = rd_reg_dword(®->isp24.host_status);
432 hccr = rd_reg_dword(®->isp24.hccr);
434 ql_log(ql_log_warn, vha, 0xd04c,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 mb[7], host_status, hccr);
442 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
443 ictrl = rd_reg_word(®->isp.ictrl);
444 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
445 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
446 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
449 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
451 /* Capture FW dump only, if PCI device active */
452 if (!pci_channel_offline(vha->hw->pdev)) {
453 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
454 if (w == 0xffff || ictrl == 0xffffffff ||
455 (chip_reset != ha->chip_reset)) {
456 /* This is special case if there is unload
457 * of driver happening and if PCI device go
458 * into bad state due to PCI error condition
459 * then only PCI ERR flag would be set.
460 * we will do premature exit for above case.
462 spin_lock_irqsave(&ha->hardware_lock, flags);
463 ha->flags.mbox_busy = 0;
464 spin_unlock_irqrestore(&ha->hardware_lock,
466 rval = QLA_FUNCTION_TIMEOUT;
470 /* Attempt to capture firmware dump for further
471 * anallysis of the current formware state. we do not
472 * need to do this if we are intentionally generating
475 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
476 qla2xxx_dump_fw(vha);
477 rval = QLA_FUNCTION_TIMEOUT;
480 spin_lock_irqsave(&ha->hardware_lock, flags);
481 ha->flags.mbox_busy = 0;
482 spin_unlock_irqrestore(&ha->hardware_lock, flags);
487 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
488 ql_dbg(ql_dbg_mbx, vha, 0x101a,
489 "Checking for additional resp interrupt.\n");
491 /* polling mode for non isp_abort commands. */
492 qla2x00_poll(ha->rsp_q_map[0]);
495 if (rval == QLA_FUNCTION_TIMEOUT &&
496 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
497 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
498 ha->flags.eeh_busy) {
499 /* not in dpc. schedule it for dpc to take over. */
500 ql_dbg(ql_dbg_mbx, vha, 0x101b,
501 "Timeout, schedule isp_abort_needed.\n");
503 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
504 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
505 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
506 if (IS_QLA82XX(ha)) {
507 ql_dbg(ql_dbg_mbx, vha, 0x112a,
508 "disabling pause transmit on port "
511 QLA82XX_CRB_NIU + 0x98,
512 CRB_NIU_XG_PAUSE_CTL_P0|
513 CRB_NIU_XG_PAUSE_CTL_P1);
515 ql_log(ql_log_info, base_vha, 0x101c,
516 "Mailbox cmd timeout occurred, cmd=0x%x, "
517 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
518 "abort.\n", command, mcp->mb[0],
521 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
522 qla2xxx_wake_dpc(vha);
524 } else if (current == ha->dpc_thread) {
525 /* call abort directly since we are in the DPC thread */
526 ql_dbg(ql_dbg_mbx, vha, 0x101d,
527 "Timeout, calling abort_isp.\n");
529 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
530 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
531 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
532 if (IS_QLA82XX(ha)) {
533 ql_dbg(ql_dbg_mbx, vha, 0x112b,
534 "disabling pause transmit on port "
537 QLA82XX_CRB_NIU + 0x98,
538 CRB_NIU_XG_PAUSE_CTL_P0|
539 CRB_NIU_XG_PAUSE_CTL_P1);
541 ql_log(ql_log_info, base_vha, 0x101e,
542 "Mailbox cmd timeout occurred, cmd=0x%x, "
543 "mb[0]=0x%x. Scheduling ISP abort ",
544 command, mcp->mb[0]);
546 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
547 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
548 /* Allow next mbx cmd to come in. */
549 complete(&ha->mbx_cmd_comp);
550 if (ha->isp_ops->abort_isp(vha) &&
551 !ha->flags.eeh_busy) {
552 /* Failed. retry later. */
553 set_bit(ISP_ABORT_NEEDED,
556 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
557 ql_dbg(ql_dbg_mbx, vha, 0x101f,
558 "Finished abort_isp.\n");
565 /* Allow next mbx cmd to come in. */
566 complete(&ha->mbx_cmd_comp);
569 if (rval == QLA_ABORTED) {
570 ql_log(ql_log_info, vha, 0xd035,
571 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
574 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
575 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
576 dev_name(&ha->pdev->dev), 0x1020+0x800,
580 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
581 if (mboxes & BIT_0) {
582 printk(" mb[%u]=%x", i, mcp->mb[i]);
585 pr_warn(" cmd=%x ****\n", command);
587 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
588 ql_dbg(ql_dbg_mbx, vha, 0x1198,
589 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
590 rd_reg_dword(®->isp24.host_status),
591 rd_reg_dword(®->isp24.ictrl),
592 rd_reg_dword(®->isp24.istatus));
594 ql_dbg(ql_dbg_mbx, vha, 0x1206,
595 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
596 rd_reg_word(®->isp.ctrl_status),
597 rd_reg_word(®->isp.ictrl),
598 rd_reg_word(®->isp.istatus));
601 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
605 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
607 * The caller of this mailbox encounter pci error.
608 * Hold the thread until PCIE link reset complete to make
609 * sure caller does not unmap dma while recovery is
619 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
620 uint32_t risc_code_size)
623 struct qla_hw_data *ha = vha->hw;
625 mbx_cmd_t *mcp = &mc;
627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
628 "Entered %s.\n", __func__);
630 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
631 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
632 mcp->mb[8] = MSW(risc_addr);
633 mcp->out_mb = MBX_8|MBX_0;
635 mcp->mb[0] = MBC_LOAD_RISC_RAM;
638 mcp->mb[1] = LSW(risc_addr);
639 mcp->mb[2] = MSW(req_dma);
640 mcp->mb[3] = LSW(req_dma);
641 mcp->mb[6] = MSW(MSD(req_dma));
642 mcp->mb[7] = LSW(MSD(req_dma));
643 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
644 if (IS_FWI2_CAPABLE(ha)) {
645 mcp->mb[4] = MSW(risc_code_size);
646 mcp->mb[5] = LSW(risc_code_size);
647 mcp->out_mb |= MBX_5|MBX_4;
649 mcp->mb[4] = LSW(risc_code_size);
650 mcp->out_mb |= MBX_4;
653 mcp->in_mb = MBX_1|MBX_0;
654 mcp->tov = MBX_TOV_SECONDS;
656 rval = qla2x00_mailbox_command(vha, mcp);
658 if (rval != QLA_SUCCESS) {
659 ql_dbg(ql_dbg_mbx, vha, 0x1023,
660 "Failed=%x mb[0]=%x mb[1]=%x.\n",
661 rval, mcp->mb[0], mcp->mb[1]);
664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
665 "Done %s.\n", __func__);
671 #define NVME_ENABLE_FLAG BIT_3
672 #define EDIF_HW_SUPPORT BIT_10
676 * Start adapter firmware.
679 * ha = adapter block pointer.
680 * TARGET_QUEUE_LOCK must be released.
681 * ADAPTER_STATE_LOCK must be released.
684 * qla2x00 local function return status code.
690 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
693 struct qla_hw_data *ha = vha->hw;
695 mbx_cmd_t *mcp = &mc;
697 #define EXE_FW_FORCE_SEMAPHORE BIT_7
700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
701 "Entered %s.\n", __func__);
704 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
707 if (IS_FWI2_CAPABLE(ha)) {
708 mcp->mb[1] = MSW(risc_addr);
709 mcp->mb[2] = LSW(risc_addr);
715 if (ha->flags.lr_detected) {
717 if (IS_BPM_RANGE_CAPABLE(ha))
719 ha->lr_distance << LR_DIST_FW_POS;
722 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
723 mcp->mb[4] |= NVME_ENABLE_FLAG;
725 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
726 struct nvram_81xx *nv = ha->nvram;
727 /* set minimum speed if specified in nvram */
728 if (nv->min_supported_speed >= 2 &&
729 nv->min_supported_speed <= 5) {
731 mcp->mb[11] |= nv->min_supported_speed & 0xF;
732 mcp->out_mb |= MBX_11;
734 vha->min_supported_speed =
735 nv->min_supported_speed;
739 mcp->mb[11] |= BIT_4;
742 if (ha->flags.exlogins_enabled)
743 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
745 if (ha->flags.exchoffld_enabled)
746 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
749 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
751 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
752 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
754 mcp->mb[1] = LSW(risc_addr);
755 mcp->out_mb |= MBX_1;
756 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
758 mcp->out_mb |= MBX_2;
762 mcp->tov = MBX_TOV_SECONDS;
764 rval = qla2x00_mailbox_command(vha, mcp);
766 if (rval != QLA_SUCCESS) {
767 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
768 mcp->mb[1] == 0x27 && retry) {
771 ql_dbg(ql_dbg_async, vha, 0x1026,
772 "Exe FW: force semaphore.\n");
778 ql_dbg(ql_dbg_async, vha, 0x509d,
779 "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
782 ql_dbg(ql_dbg_mbx, vha, 0x1026,
783 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
788 if (!IS_FWI2_CAPABLE(ha))
791 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
792 ql_dbg(ql_dbg_mbx, vha, 0x119a,
793 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
794 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
795 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
796 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
797 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
798 ha->max_supported_speed == 0 ? "16Gps" :
799 ha->max_supported_speed == 1 ? "32Gps" :
800 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
801 if (vha->min_supported_speed) {
802 ha->min_supported_speed = mcp->mb[5] &
803 (BIT_0 | BIT_1 | BIT_2);
804 ql_dbg(ql_dbg_mbx, vha, 0x119c,
805 "min_supported_speed=%s.\n",
806 ha->min_supported_speed == 6 ? "64Gps" :
807 ha->min_supported_speed == 5 ? "32Gps" :
808 ha->min_supported_speed == 4 ? "16Gps" :
809 ha->min_supported_speed == 3 ? "8Gps" :
810 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
814 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
815 ha->flags.edif_hw = 1;
816 ql_log(ql_log_info, vha, 0xffff,
817 "%s: edif HW\n", __func__);
821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
822 "Done %s.\n", __func__);
828 * qla_get_exlogin_status
829 * Get extended login status
830 * uses the memory offload control/status Mailbox
833 * ha: adapter state pointer.
834 * fwopt: firmware options
837 * qla2x00 local function status
842 #define FETCH_XLOGINS_STAT 0x8
844 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
845 uint16_t *ex_logins_cnt)
849 mbx_cmd_t *mcp = &mc;
851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
852 "Entered %s\n", __func__);
854 memset(mcp->mb, 0 , sizeof(mcp->mb));
855 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
856 mcp->mb[1] = FETCH_XLOGINS_STAT;
857 mcp->out_mb = MBX_1|MBX_0;
858 mcp->in_mb = MBX_10|MBX_4|MBX_0;
859 mcp->tov = MBX_TOV_SECONDS;
862 rval = qla2x00_mailbox_command(vha, mcp);
863 if (rval != QLA_SUCCESS) {
864 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
866 *buf_sz = mcp->mb[4];
867 *ex_logins_cnt = mcp->mb[10];
869 ql_log(ql_log_info, vha, 0x1190,
870 "buffer size 0x%x, exchange login count=%d\n",
871 mcp->mb[4], mcp->mb[10]);
873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
874 "Done %s.\n", __func__);
881 * qla_set_exlogin_mem_cfg
882 * set extended login memory configuration
883 * Mbx needs to be issues before init_cb is set
886 * ha: adapter state pointer.
887 * buffer: buffer pointer
888 * phys_addr: physical address of buffer
889 * size: size of buffer
890 * TARGET_QUEUE_LOCK must be released
891 * ADAPTER_STATE_LOCK must be release
894 * qla2x00 local funxtion status code.
899 #define CONFIG_XLOGINS_MEM 0x9
901 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
905 mbx_cmd_t *mcp = &mc;
906 struct qla_hw_data *ha = vha->hw;
908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
909 "Entered %s.\n", __func__);
911 memset(mcp->mb, 0 , sizeof(mcp->mb));
912 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
913 mcp->mb[1] = CONFIG_XLOGINS_MEM;
914 mcp->mb[2] = MSW(phys_addr);
915 mcp->mb[3] = LSW(phys_addr);
916 mcp->mb[6] = MSW(MSD(phys_addr));
917 mcp->mb[7] = LSW(MSD(phys_addr));
918 mcp->mb[8] = MSW(ha->exlogin_size);
919 mcp->mb[9] = LSW(ha->exlogin_size);
920 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
921 mcp->in_mb = MBX_11|MBX_0;
922 mcp->tov = MBX_TOV_SECONDS;
924 rval = qla2x00_mailbox_command(vha, mcp);
925 if (rval != QLA_SUCCESS) {
926 ql_dbg(ql_dbg_mbx, vha, 0x111b,
927 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
928 rval, mcp->mb[0], mcp->mb[11]);
930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
931 "Done %s.\n", __func__);
938 * qla_get_exchoffld_status
939 * Get exchange offload status
940 * uses the memory offload control/status Mailbox
943 * ha: adapter state pointer.
944 * fwopt: firmware options
947 * qla2x00 local function status
952 #define FETCH_XCHOFFLD_STAT 0x2
954 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
955 uint16_t *ex_logins_cnt)
959 mbx_cmd_t *mcp = &mc;
961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
962 "Entered %s\n", __func__);
964 memset(mcp->mb, 0 , sizeof(mcp->mb));
965 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
966 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
967 mcp->out_mb = MBX_1|MBX_0;
968 mcp->in_mb = MBX_10|MBX_4|MBX_0;
969 mcp->tov = MBX_TOV_SECONDS;
972 rval = qla2x00_mailbox_command(vha, mcp);
973 if (rval != QLA_SUCCESS) {
974 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
976 *buf_sz = mcp->mb[4];
977 *ex_logins_cnt = mcp->mb[10];
979 ql_log(ql_log_info, vha, 0x118e,
980 "buffer size 0x%x, exchange offload count=%d\n",
981 mcp->mb[4], mcp->mb[10]);
983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
984 "Done %s.\n", __func__);
991 * qla_set_exchoffld_mem_cfg
992 * Set exchange offload memory configuration
993 * Mbx needs to be issues before init_cb is set
996 * ha: adapter state pointer.
997 * buffer: buffer pointer
998 * phys_addr: physical address of buffer
999 * size: size of buffer
1000 * TARGET_QUEUE_LOCK must be released
1001 * ADAPTER_STATE_LOCK must be release
1004 * qla2x00 local funxtion status code.
1009 #define CONFIG_XCHOFFLD_MEM 0x3
1011 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1015 mbx_cmd_t *mcp = &mc;
1016 struct qla_hw_data *ha = vha->hw;
1018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1019 "Entered %s.\n", __func__);
1021 memset(mcp->mb, 0 , sizeof(mcp->mb));
1022 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1023 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1024 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1025 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1026 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1027 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1028 mcp->mb[8] = MSW(ha->exchoffld_size);
1029 mcp->mb[9] = LSW(ha->exchoffld_size);
1030 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1031 mcp->in_mb = MBX_11|MBX_0;
1032 mcp->tov = MBX_TOV_SECONDS;
1034 rval = qla2x00_mailbox_command(vha, mcp);
1035 if (rval != QLA_SUCCESS) {
1037 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1039 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1040 "Done %s.\n", __func__);
1047 * qla2x00_get_fw_version
1048 * Get firmware version.
1051 * ha: adapter state pointer.
1052 * major: pointer for major number.
1053 * minor: pointer for minor number.
1054 * subminor: pointer for subminor number.
1057 * qla2x00 local function return status code.
1063 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1067 mbx_cmd_t *mcp = &mc;
1068 struct qla_hw_data *ha = vha->hw;
1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1071 "Entered %s.\n", __func__);
1073 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1074 mcp->out_mb = MBX_0;
1075 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1076 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1077 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1078 if (IS_FWI2_CAPABLE(ha))
1079 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1080 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1082 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1083 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1086 mcp->tov = MBX_TOV_SECONDS;
1087 rval = qla2x00_mailbox_command(vha, mcp);
1088 if (rval != QLA_SUCCESS)
1091 /* Return mailbox data. */
1092 ha->fw_major_version = mcp->mb[1];
1093 ha->fw_minor_version = mcp->mb[2];
1094 ha->fw_subminor_version = mcp->mb[3];
1095 ha->fw_attributes = mcp->mb[6];
1096 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1097 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1099 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1101 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1102 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1103 ha->mpi_version[1] = mcp->mb[11] >> 8;
1104 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1105 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1106 ha->phy_version[0] = mcp->mb[8] & 0xff;
1107 ha->phy_version[1] = mcp->mb[9] >> 8;
1108 ha->phy_version[2] = mcp->mb[9] & 0xff;
1111 if (IS_FWI2_CAPABLE(ha)) {
1112 ha->fw_attributes_h = mcp->mb[15];
1113 ha->fw_attributes_ext[0] = mcp->mb[16];
1114 ha->fw_attributes_ext[1] = mcp->mb[17];
1115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1116 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1117 __func__, mcp->mb[15], mcp->mb[6]);
1118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1119 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1120 __func__, mcp->mb[17], mcp->mb[16]);
1122 if (ha->fw_attributes_h & 0x4)
1123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1124 "%s: Firmware supports Extended Login 0x%x\n",
1125 __func__, ha->fw_attributes_h);
1127 if (ha->fw_attributes_h & 0x8)
1128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1129 "%s: Firmware supports Exchange Offload 0x%x\n",
1130 __func__, ha->fw_attributes_h);
1133 * FW supports nvme and driver load parameter requested nvme.
1134 * BIT 26 of fw_attributes indicates NVMe support.
1136 if ((ha->fw_attributes_h &
1137 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1139 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1140 vha->flags.nvme_first_burst = 1;
1142 vha->flags.nvme_enabled = 1;
1143 ql_log(ql_log_info, vha, 0xd302,
1144 "%s: FC-NVMe is Enabled (0x%x)\n",
1145 __func__, ha->fw_attributes_h);
1148 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1149 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1150 ql_log(ql_log_info, vha, 0xd302,
1151 "Firmware supports NVMe2 0x%x\n",
1152 ha->fw_attributes_ext[0]);
1153 vha->flags.nvme2_enabled = 1;
1156 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1157 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1158 ha->flags.edif_enabled = 1;
1159 ql_log(ql_log_info, vha, 0xffff,
1160 "%s: edif is enabled\n", __func__);
1164 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1165 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1166 ha->serdes_version[1] = mcp->mb[8] >> 8;
1167 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1168 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1169 ha->mpi_version[1] = mcp->mb[11] >> 8;
1170 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1171 ha->pep_version[0] = mcp->mb[13] & 0xff;
1172 ha->pep_version[1] = mcp->mb[14] >> 8;
1173 ha->pep_version[2] = mcp->mb[14] & 0xff;
1174 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1175 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1176 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1177 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1178 if (IS_QLA28XX(ha)) {
1179 if (mcp->mb[16] & BIT_10)
1180 ha->flags.secure_fw = 1;
1182 ql_log(ql_log_info, vha, 0xffff,
1183 "Secure Flash Update in FW: %s\n",
1184 (ha->flags.secure_fw) ? "Supported" :
1188 if (ha->flags.scm_supported_a &&
1189 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1190 ha->flags.scm_supported_f = 1;
1191 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1193 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1194 (ha->flags.scm_supported_f) ? "Supported" :
1197 if (vha->flags.nvme2_enabled) {
1198 /* set BIT_15 of special feature control block for SLER */
1199 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1200 /* set BIT_14 of special feature control block for PI CTRL*/
1201 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1206 if (rval != QLA_SUCCESS) {
1208 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1212 "Done %s.\n", __func__);
1218 * qla2x00_get_fw_options
1219 * Set firmware options.
1222 * ha = adapter block pointer.
1223 * fwopt = pointer for firmware options.
1226 * qla2x00 local function return status code.
1232 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1236 mbx_cmd_t *mcp = &mc;
1238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1239 "Entered %s.\n", __func__);
1241 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1242 mcp->out_mb = MBX_0;
1243 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1244 mcp->tov = MBX_TOV_SECONDS;
1246 rval = qla2x00_mailbox_command(vha, mcp);
1248 if (rval != QLA_SUCCESS) {
1250 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1252 fwopts[0] = mcp->mb[0];
1253 fwopts[1] = mcp->mb[1];
1254 fwopts[2] = mcp->mb[2];
1255 fwopts[3] = mcp->mb[3];
1257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1258 "Done %s.\n", __func__);
1266 * qla2x00_set_fw_options
1267 * Set firmware options.
1270 * ha = adapter block pointer.
1271 * fwopt = pointer for firmware options.
1274 * qla2x00 local function return status code.
1280 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1284 mbx_cmd_t *mcp = &mc;
1286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1287 "Entered %s.\n", __func__);
1289 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1290 mcp->mb[1] = fwopts[1];
1291 mcp->mb[2] = fwopts[2];
1292 mcp->mb[3] = fwopts[3];
1293 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1295 if (IS_FWI2_CAPABLE(vha->hw)) {
1296 mcp->in_mb |= MBX_1;
1297 mcp->mb[10] = fwopts[10];
1298 mcp->out_mb |= MBX_10;
1300 mcp->mb[10] = fwopts[10];
1301 mcp->mb[11] = fwopts[11];
1302 mcp->mb[12] = 0; /* Undocumented, but used */
1303 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1305 mcp->tov = MBX_TOV_SECONDS;
1307 rval = qla2x00_mailbox_command(vha, mcp);
1309 fwopts[0] = mcp->mb[0];
1311 if (rval != QLA_SUCCESS) {
1313 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1314 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1318 "Done %s.\n", __func__);
1325 * qla2x00_mbx_reg_test
1326 * Mailbox register wrap test.
1329 * ha = adapter block pointer.
1330 * TARGET_QUEUE_LOCK must be released.
1331 * ADAPTER_STATE_LOCK must be released.
1334 * qla2x00 local function return status code.
1340 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1344 mbx_cmd_t *mcp = &mc;
1346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1347 "Entered %s.\n", __func__);
1349 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1350 mcp->mb[1] = 0xAAAA;
1351 mcp->mb[2] = 0x5555;
1352 mcp->mb[3] = 0xAA55;
1353 mcp->mb[4] = 0x55AA;
1354 mcp->mb[5] = 0xA5A5;
1355 mcp->mb[6] = 0x5A5A;
1356 mcp->mb[7] = 0x2525;
1357 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1358 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1359 mcp->tov = MBX_TOV_SECONDS;
1361 rval = qla2x00_mailbox_command(vha, mcp);
1363 if (rval == QLA_SUCCESS) {
1364 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1365 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1366 rval = QLA_FUNCTION_FAILED;
1367 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1368 mcp->mb[7] != 0x2525)
1369 rval = QLA_FUNCTION_FAILED;
1372 if (rval != QLA_SUCCESS) {
1374 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1379 "Done %s.\n", __func__);
1386 * qla2x00_verify_checksum
1387 * Verify firmware checksum.
1390 * ha = adapter block pointer.
1391 * TARGET_QUEUE_LOCK must be released.
1392 * ADAPTER_STATE_LOCK must be released.
1395 * qla2x00 local function return status code.
1401 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1405 mbx_cmd_t *mcp = &mc;
1407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1408 "Entered %s.\n", __func__);
1410 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1411 mcp->out_mb = MBX_0;
1413 if (IS_FWI2_CAPABLE(vha->hw)) {
1414 mcp->mb[1] = MSW(risc_addr);
1415 mcp->mb[2] = LSW(risc_addr);
1416 mcp->out_mb |= MBX_2|MBX_1;
1417 mcp->in_mb |= MBX_2|MBX_1;
1419 mcp->mb[1] = LSW(risc_addr);
1420 mcp->out_mb |= MBX_1;
1421 mcp->in_mb |= MBX_1;
1424 mcp->tov = MBX_TOV_SECONDS;
1426 rval = qla2x00_mailbox_command(vha, mcp);
1428 if (rval != QLA_SUCCESS) {
1429 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1430 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1431 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1434 "Done %s.\n", __func__);
1441 * qla2x00_issue_iocb
1442 * Issue IOCB using mailbox command
1445 * ha = adapter state pointer.
1446 * buffer = buffer pointer.
1447 * phys_addr = physical address of buffer.
1448 * size = size of buffer.
1449 * TARGET_QUEUE_LOCK must be released.
1450 * ADAPTER_STATE_LOCK must be released.
1453 * qla2x00 local function return status code.
1459 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1460 dma_addr_t phys_addr, size_t size, uint32_t tov)
1464 mbx_cmd_t *mcp = &mc;
1466 if (!vha->hw->flags.fw_started)
1467 return QLA_INVALID_COMMAND;
1469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1470 "Entered %s.\n", __func__);
1472 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1474 mcp->mb[2] = MSW(LSD(phys_addr));
1475 mcp->mb[3] = LSW(LSD(phys_addr));
1476 mcp->mb[6] = MSW(MSD(phys_addr));
1477 mcp->mb[7] = LSW(MSD(phys_addr));
1478 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1479 mcp->in_mb = MBX_1|MBX_0;
1482 rval = qla2x00_mailbox_command(vha, mcp);
1484 if (rval != QLA_SUCCESS) {
1486 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1488 sts_entry_t *sts_entry = buffer;
1490 /* Mask reserved bits. */
1491 sts_entry->entry_status &=
1492 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1494 "Done %s (status=%x).\n", __func__,
1495 sts_entry->entry_status);
1502 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1505 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1510 * qla2x00_abort_command
1511 * Abort command aborts a specified IOCB.
1514 * ha = adapter block pointer.
1515 * sp = SB structure pointer.
1518 * qla2x00 local function return status code.
1524 qla2x00_abort_command(srb_t *sp)
1526 unsigned long flags = 0;
1528 uint32_t handle = 0;
1530 mbx_cmd_t *mcp = &mc;
1531 fc_port_t *fcport = sp->fcport;
1532 scsi_qla_host_t *vha = fcport->vha;
1533 struct qla_hw_data *ha = vha->hw;
1534 struct req_que *req;
1535 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1538 "Entered %s.\n", __func__);
1541 req = sp->qpair->req;
1545 spin_lock_irqsave(&ha->hardware_lock, flags);
1546 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1547 if (req->outstanding_cmds[handle] == sp)
1550 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1552 if (handle == req->num_outstanding_cmds) {
1553 /* command not found */
1554 return QLA_FUNCTION_FAILED;
1557 mcp->mb[0] = MBC_ABORT_COMMAND;
1558 if (HAS_EXTENDED_IDS(ha))
1559 mcp->mb[1] = fcport->loop_id;
1561 mcp->mb[1] = fcport->loop_id << 8;
1562 mcp->mb[2] = (uint16_t)handle;
1563 mcp->mb[3] = (uint16_t)(handle >> 16);
1564 mcp->mb[6] = (uint16_t)cmd->device->lun;
1565 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1567 mcp->tov = MBX_TOV_SECONDS;
1569 rval = qla2x00_mailbox_command(vha, mcp);
1571 if (rval != QLA_SUCCESS) {
1572 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1575 "Done %s.\n", __func__);
1582 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1586 mbx_cmd_t *mcp = &mc;
1587 scsi_qla_host_t *vha;
1591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1592 "Entered %s.\n", __func__);
1594 mcp->mb[0] = MBC_ABORT_TARGET;
1595 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1596 if (HAS_EXTENDED_IDS(vha->hw)) {
1597 mcp->mb[1] = fcport->loop_id;
1599 mcp->out_mb |= MBX_10;
1601 mcp->mb[1] = fcport->loop_id << 8;
1603 mcp->mb[2] = vha->hw->loop_reset_delay;
1604 mcp->mb[9] = vha->vp_idx;
1607 mcp->tov = MBX_TOV_SECONDS;
1609 rval = qla2x00_mailbox_command(vha, mcp);
1610 if (rval != QLA_SUCCESS) {
1611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1612 "Failed=%x.\n", rval);
1615 /* Issue marker IOCB. */
1616 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1618 if (rval2 != QLA_SUCCESS) {
1619 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1620 "Failed to issue marker IOCB (%x).\n", rval2);
1622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1623 "Done %s.\n", __func__);
1630 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1634 mbx_cmd_t *mcp = &mc;
1635 scsi_qla_host_t *vha;
1639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1640 "Entered %s.\n", __func__);
1642 mcp->mb[0] = MBC_LUN_RESET;
1643 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1644 if (HAS_EXTENDED_IDS(vha->hw))
1645 mcp->mb[1] = fcport->loop_id;
1647 mcp->mb[1] = fcport->loop_id << 8;
1648 mcp->mb[2] = (u32)l;
1650 mcp->mb[9] = vha->vp_idx;
1653 mcp->tov = MBX_TOV_SECONDS;
1655 rval = qla2x00_mailbox_command(vha, mcp);
1656 if (rval != QLA_SUCCESS) {
1657 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1660 /* Issue marker IOCB. */
1661 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1663 if (rval2 != QLA_SUCCESS) {
1664 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1665 "Failed to issue marker IOCB (%x).\n", rval2);
1667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1668 "Done %s.\n", __func__);
1675 * qla2x00_get_adapter_id
1676 * Get adapter ID and topology.
1679 * ha = adapter block pointer.
1680 * id = pointer for loop ID.
1681 * al_pa = pointer for AL_PA.
1682 * area = pointer for area.
1683 * domain = pointer for domain.
1684 * top = pointer for topology.
1685 * TARGET_QUEUE_LOCK must be released.
1686 * ADAPTER_STATE_LOCK must be released.
1689 * qla2x00 local function return status code.
1695 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1696 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1700 mbx_cmd_t *mcp = &mc;
1702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1703 "Entered %s.\n", __func__);
1705 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1706 mcp->mb[9] = vha->vp_idx;
1707 mcp->out_mb = MBX_9|MBX_0;
1708 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1709 if (IS_CNA_CAPABLE(vha->hw))
1710 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1711 if (IS_FWI2_CAPABLE(vha->hw))
1712 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1713 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1714 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1716 mcp->tov = MBX_TOV_SECONDS;
1718 rval = qla2x00_mailbox_command(vha, mcp);
1719 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1720 rval = QLA_COMMAND_ERROR;
1721 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1722 rval = QLA_INVALID_COMMAND;
1726 *al_pa = LSB(mcp->mb[2]);
1727 *area = MSB(mcp->mb[2]);
1728 *domain = LSB(mcp->mb[3]);
1730 *sw_cap = mcp->mb[7];
1732 if (rval != QLA_SUCCESS) {
1734 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1737 "Done %s.\n", __func__);
1739 if (IS_CNA_CAPABLE(vha->hw)) {
1740 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1741 vha->fcoe_fcf_idx = mcp->mb[10];
1742 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1743 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1744 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1745 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1746 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1747 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1749 /* If FA-WWN supported */
1750 if (IS_FAWWN_CAPABLE(vha->hw)) {
1751 if (mcp->mb[7] & BIT_14) {
1752 vha->port_name[0] = MSB(mcp->mb[16]);
1753 vha->port_name[1] = LSB(mcp->mb[16]);
1754 vha->port_name[2] = MSB(mcp->mb[17]);
1755 vha->port_name[3] = LSB(mcp->mb[17]);
1756 vha->port_name[4] = MSB(mcp->mb[18]);
1757 vha->port_name[5] = LSB(mcp->mb[18]);
1758 vha->port_name[6] = MSB(mcp->mb[19]);
1759 vha->port_name[7] = LSB(mcp->mb[19]);
1760 fc_host_port_name(vha->host) =
1761 wwn_to_u64(vha->port_name);
1762 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1763 "FA-WWN acquired %016llx\n",
1764 wwn_to_u64(vha->port_name));
1768 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1769 vha->bbcr = mcp->mb[15];
1770 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1771 ql_log(ql_log_info, vha, 0x11a4,
1772 "SCM: EDC ELS completed, flags 0x%x\n",
1775 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1776 vha->hw->flags.scm_enabled = 1;
1777 vha->scm_fabric_connection_flags |=
1778 SCM_FLAG_RDF_COMPLETED;
1779 ql_log(ql_log_info, vha, 0x11a5,
1780 "SCM: RDF ELS completed, flags 0x%x\n",
1790 * qla2x00_get_retry_cnt
1791 * Get current firmware login retry count and delay.
1794 * ha = adapter block pointer.
1795 * retry_cnt = pointer to login retry count.
1796 * tov = pointer to login timeout value.
1799 * qla2x00 local function return status code.
1805 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1811 mbx_cmd_t *mcp = &mc;
1813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1814 "Entered %s.\n", __func__);
1816 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1817 mcp->out_mb = MBX_0;
1818 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1819 mcp->tov = MBX_TOV_SECONDS;
1821 rval = qla2x00_mailbox_command(vha, mcp);
1823 if (rval != QLA_SUCCESS) {
1825 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1826 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1828 /* Convert returned data and check our values. */
1829 *r_a_tov = mcp->mb[3] / 2;
1830 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1831 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1832 /* Update to the larger values */
1833 *retry_cnt = (uint8_t)mcp->mb[1];
1837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1838 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1845 * qla2x00_init_firmware
1846 * Initialize adapter firmware.
1849 * ha = adapter block pointer.
1850 * dptr = Initialization control block pointer.
1851 * size = size of initialization control block.
1852 * TARGET_QUEUE_LOCK must be released.
1853 * ADAPTER_STATE_LOCK must be released.
1856 * qla2x00 local function return status code.
1862 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1866 mbx_cmd_t *mcp = &mc;
1867 struct qla_hw_data *ha = vha->hw;
1869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1870 "Entered %s.\n", __func__);
1872 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1873 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1874 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1876 if (ha->flags.npiv_supported)
1877 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1879 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1882 mcp->mb[2] = MSW(ha->init_cb_dma);
1883 mcp->mb[3] = LSW(ha->init_cb_dma);
1884 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1885 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1886 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1887 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1889 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1890 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1891 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1892 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1893 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1894 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1897 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1898 mcp->mb[1] |= BIT_1;
1899 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1900 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1901 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1902 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1903 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1904 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1907 /* 1 and 2 should normally be captured. */
1908 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1909 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1910 /* mb3 is additional info about the installed SFP. */
1911 mcp->in_mb |= MBX_3;
1912 mcp->buf_size = size;
1913 mcp->flags = MBX_DMA_OUT;
1914 mcp->tov = MBX_TOV_SECONDS;
1915 rval = qla2x00_mailbox_command(vha, mcp);
1917 if (rval != QLA_SUCCESS) {
1919 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1920 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1921 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1923 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1924 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1925 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1927 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1928 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1929 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1930 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1933 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1934 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1935 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1936 "Invalid SFP/Validation Failed\n");
1938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1939 "Done %s.\n", __func__);
1947 * qla2x00_get_port_database
1948 * Issue normal/enhanced get port database mailbox command
1949 * and copy device name as necessary.
1952 * ha = adapter state pointer.
1953 * dev = structure pointer.
1954 * opt = enhanced cmd option byte.
1957 * qla2x00 local function return status code.
1963 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1967 mbx_cmd_t *mcp = &mc;
1968 port_database_t *pd;
1969 struct port_database_24xx *pd24;
1971 struct qla_hw_data *ha = vha->hw;
1973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1974 "Entered %s.\n", __func__);
1977 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1979 ql_log(ql_log_warn, vha, 0x1050,
1980 "Failed to allocate port database structure.\n");
1982 return QLA_MEMORY_ALLOC_FAILED;
1985 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1986 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1987 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1988 mcp->mb[2] = MSW(pd_dma);
1989 mcp->mb[3] = LSW(pd_dma);
1990 mcp->mb[6] = MSW(MSD(pd_dma));
1991 mcp->mb[7] = LSW(MSD(pd_dma));
1992 mcp->mb[9] = vha->vp_idx;
1993 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1995 if (IS_FWI2_CAPABLE(ha)) {
1996 mcp->mb[1] = fcport->loop_id;
1998 mcp->out_mb |= MBX_10|MBX_1;
1999 mcp->in_mb |= MBX_1;
2000 } else if (HAS_EXTENDED_IDS(ha)) {
2001 mcp->mb[1] = fcport->loop_id;
2003 mcp->out_mb |= MBX_10|MBX_1;
2005 mcp->mb[1] = fcport->loop_id << 8 | opt;
2006 mcp->out_mb |= MBX_1;
2008 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2009 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2010 mcp->flags = MBX_DMA_IN;
2011 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2012 rval = qla2x00_mailbox_command(vha, mcp);
2013 if (rval != QLA_SUCCESS)
2016 if (IS_FWI2_CAPABLE(ha)) {
2018 u8 current_login_state, last_login_state;
2020 pd24 = (struct port_database_24xx *) pd;
2022 /* Check for logged in state. */
2023 if (NVME_TARGET(ha, fcport)) {
2024 current_login_state = pd24->current_login_state >> 4;
2025 last_login_state = pd24->last_login_state >> 4;
2027 current_login_state = pd24->current_login_state & 0xf;
2028 last_login_state = pd24->last_login_state & 0xf;
2030 fcport->current_login_state = pd24->current_login_state;
2031 fcport->last_login_state = pd24->last_login_state;
2033 /* Check for logged in state. */
2034 if (current_login_state != PDS_PRLI_COMPLETE &&
2035 last_login_state != PDS_PRLI_COMPLETE) {
2036 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2037 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2038 current_login_state, last_login_state,
2040 rval = QLA_FUNCTION_FAILED;
2046 if (fcport->loop_id == FC_NO_LOOP_ID ||
2047 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2048 memcmp(fcport->port_name, pd24->port_name, 8))) {
2049 /* We lost the device mid way. */
2050 rval = QLA_NOT_LOGGED_IN;
2054 /* Names are little-endian. */
2055 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2056 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2058 /* Get port_id of device. */
2059 fcport->d_id.b.domain = pd24->port_id[0];
2060 fcport->d_id.b.area = pd24->port_id[1];
2061 fcport->d_id.b.al_pa = pd24->port_id[2];
2062 fcport->d_id.b.rsvd_1 = 0;
2064 /* If not target must be initiator or unknown type. */
2065 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2066 fcport->port_type = FCT_INITIATOR;
2068 fcport->port_type = FCT_TARGET;
2070 /* Passback COS information. */
2071 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2072 FC_COS_CLASS2 : FC_COS_CLASS3;
2074 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2075 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2079 /* Check for logged in state. */
2080 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2081 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2082 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2083 "Unable to verify login-state (%x/%x) - "
2084 "portid=%02x%02x%02x.\n", pd->master_state,
2085 pd->slave_state, fcport->d_id.b.domain,
2086 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2087 rval = QLA_FUNCTION_FAILED;
2091 if (fcport->loop_id == FC_NO_LOOP_ID ||
2092 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2093 memcmp(fcport->port_name, pd->port_name, 8))) {
2094 /* We lost the device mid way. */
2095 rval = QLA_NOT_LOGGED_IN;
2099 /* Names are little-endian. */
2100 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2101 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2103 /* Get port_id of device. */
2104 fcport->d_id.b.domain = pd->port_id[0];
2105 fcport->d_id.b.area = pd->port_id[3];
2106 fcport->d_id.b.al_pa = pd->port_id[2];
2107 fcport->d_id.b.rsvd_1 = 0;
2109 /* If not target must be initiator or unknown type. */
2110 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2111 fcport->port_type = FCT_INITIATOR;
2113 fcport->port_type = FCT_TARGET;
2115 /* Passback COS information. */
2116 fcport->supported_classes = (pd->options & BIT_4) ?
2117 FC_COS_CLASS2 : FC_COS_CLASS3;
2121 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2124 if (rval != QLA_SUCCESS) {
2125 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2126 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2127 mcp->mb[0], mcp->mb[1]);
2129 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2130 "Done %s.\n", __func__);
2137 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2138 struct port_database_24xx *pdb)
2141 mbx_cmd_t *mcp = &mc;
2145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2146 "Entered %s.\n", __func__);
2148 memset(pdb, 0, sizeof(*pdb));
2150 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2151 sizeof(*pdb), DMA_FROM_DEVICE);
2153 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2154 return QLA_MEMORY_ALLOC_FAILED;
2157 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2158 mcp->mb[1] = nport_handle;
2159 mcp->mb[2] = MSW(LSD(pdb_dma));
2160 mcp->mb[3] = LSW(LSD(pdb_dma));
2161 mcp->mb[6] = MSW(MSD(pdb_dma));
2162 mcp->mb[7] = LSW(MSD(pdb_dma));
2165 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2166 mcp->in_mb = MBX_1|MBX_0;
2167 mcp->buf_size = sizeof(*pdb);
2168 mcp->flags = MBX_DMA_IN;
2169 mcp->tov = vha->hw->login_timeout * 2;
2170 rval = qla2x00_mailbox_command(vha, mcp);
2172 if (rval != QLA_SUCCESS) {
2173 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2174 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2175 rval, mcp->mb[0], mcp->mb[1]);
2177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2178 "Done %s.\n", __func__);
2181 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2182 sizeof(*pdb), DMA_FROM_DEVICE);
2188 * qla2x00_get_firmware_state
2189 * Get adapter firmware state.
2192 * ha = adapter block pointer.
2193 * dptr = pointer for firmware state.
2194 * TARGET_QUEUE_LOCK must be released.
2195 * ADAPTER_STATE_LOCK must be released.
2198 * qla2x00 local function return status code.
2204 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2208 mbx_cmd_t *mcp = &mc;
2209 struct qla_hw_data *ha = vha->hw;
2211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2212 "Entered %s.\n", __func__);
2214 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2215 mcp->out_mb = MBX_0;
2216 if (IS_FWI2_CAPABLE(vha->hw))
2217 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2219 mcp->in_mb = MBX_1|MBX_0;
2220 mcp->tov = MBX_TOV_SECONDS;
2222 rval = qla2x00_mailbox_command(vha, mcp);
2224 /* Return firmware states. */
2225 states[0] = mcp->mb[1];
2226 if (IS_FWI2_CAPABLE(vha->hw)) {
2227 states[1] = mcp->mb[2];
2228 states[2] = mcp->mb[3]; /* SFP info */
2229 states[3] = mcp->mb[4];
2230 states[4] = mcp->mb[5];
2231 states[5] = mcp->mb[6]; /* DPORT status */
2234 if (rval != QLA_SUCCESS) {
2236 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2238 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2239 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2240 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2241 "Invalid SFP/Validation Failed\n");
2243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2244 "Done %s.\n", __func__);
2251 * qla2x00_get_port_name
2252 * Issue get port name mailbox command.
2253 * Returned name is in big endian format.
2256 * ha = adapter block pointer.
2257 * loop_id = loop ID of device.
2258 * name = pointer for name.
2259 * TARGET_QUEUE_LOCK must be released.
2260 * ADAPTER_STATE_LOCK must be released.
2263 * qla2x00 local function return status code.
2269 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2274 mbx_cmd_t *mcp = &mc;
2276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2277 "Entered %s.\n", __func__);
2279 mcp->mb[0] = MBC_GET_PORT_NAME;
2280 mcp->mb[9] = vha->vp_idx;
2281 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2282 if (HAS_EXTENDED_IDS(vha->hw)) {
2283 mcp->mb[1] = loop_id;
2285 mcp->out_mb |= MBX_10;
2287 mcp->mb[1] = loop_id << 8 | opt;
2290 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2291 mcp->tov = MBX_TOV_SECONDS;
2293 rval = qla2x00_mailbox_command(vha, mcp);
2295 if (rval != QLA_SUCCESS) {
2297 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2300 /* This function returns name in big endian. */
2301 name[0] = MSB(mcp->mb[2]);
2302 name[1] = LSB(mcp->mb[2]);
2303 name[2] = MSB(mcp->mb[3]);
2304 name[3] = LSB(mcp->mb[3]);
2305 name[4] = MSB(mcp->mb[6]);
2306 name[5] = LSB(mcp->mb[6]);
2307 name[6] = MSB(mcp->mb[7]);
2308 name[7] = LSB(mcp->mb[7]);
2311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2312 "Done %s.\n", __func__);
2319 * qla24xx_link_initialization
2320 * Issue link initialization mailbox command.
2323 * ha = adapter block pointer.
2324 * TARGET_QUEUE_LOCK must be released.
2325 * ADAPTER_STATE_LOCK must be released.
2328 * qla2x00 local function return status code.
2334 qla24xx_link_initialize(scsi_qla_host_t *vha)
2338 mbx_cmd_t *mcp = &mc;
2340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2341 "Entered %s.\n", __func__);
2343 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2344 return QLA_FUNCTION_FAILED;
2346 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2348 if (vha->hw->operating_mode == LOOP)
2349 mcp->mb[1] |= BIT_6;
2351 mcp->mb[1] |= BIT_5;
2354 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2356 mcp->tov = MBX_TOV_SECONDS;
2358 rval = qla2x00_mailbox_command(vha, mcp);
2360 if (rval != QLA_SUCCESS) {
2361 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2364 "Done %s.\n", __func__);
2372 * Issue LIP reset mailbox command.
2375 * ha = adapter block pointer.
2376 * TARGET_QUEUE_LOCK must be released.
2377 * ADAPTER_STATE_LOCK must be released.
2380 * qla2x00 local function return status code.
2386 qla2x00_lip_reset(scsi_qla_host_t *vha)
2390 mbx_cmd_t *mcp = &mc;
2392 ql_dbg(ql_dbg_disc, vha, 0x105a,
2393 "Entered %s.\n", __func__);
2395 if (IS_CNA_CAPABLE(vha->hw)) {
2396 /* Logout across all FCFs. */
2397 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2400 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2401 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2402 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2405 mcp->mb[3] = vha->hw->loop_reset_delay;
2406 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2408 mcp->mb[0] = MBC_LIP_RESET;
2409 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2410 if (HAS_EXTENDED_IDS(vha->hw)) {
2411 mcp->mb[1] = 0x00ff;
2413 mcp->out_mb |= MBX_10;
2415 mcp->mb[1] = 0xff00;
2417 mcp->mb[2] = vha->hw->loop_reset_delay;
2421 mcp->tov = MBX_TOV_SECONDS;
2423 rval = qla2x00_mailbox_command(vha, mcp);
2425 if (rval != QLA_SUCCESS) {
2427 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2431 "Done %s.\n", __func__);
2442 * ha = adapter block pointer.
2443 * sns = pointer for command.
2444 * cmd_size = command size.
2445 * buf_size = response/command size.
2446 * TARGET_QUEUE_LOCK must be released.
2447 * ADAPTER_STATE_LOCK must be released.
2450 * qla2x00 local function return status code.
2456 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2457 uint16_t cmd_size, size_t buf_size)
2461 mbx_cmd_t *mcp = &mc;
2463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2464 "Entered %s.\n", __func__);
2466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2467 "Retry cnt=%d ratov=%d total tov=%d.\n",
2468 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2470 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2471 mcp->mb[1] = cmd_size;
2472 mcp->mb[2] = MSW(sns_phys_address);
2473 mcp->mb[3] = LSW(sns_phys_address);
2474 mcp->mb[6] = MSW(MSD(sns_phys_address));
2475 mcp->mb[7] = LSW(MSD(sns_phys_address));
2476 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2477 mcp->in_mb = MBX_0|MBX_1;
2478 mcp->buf_size = buf_size;
2479 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2480 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2481 rval = qla2x00_mailbox_command(vha, mcp);
2483 if (rval != QLA_SUCCESS) {
2485 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2486 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2487 rval, mcp->mb[0], mcp->mb[1]);
2490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2491 "Done %s.\n", __func__);
2498 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2499 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2503 struct logio_entry_24xx *lg;
2506 struct qla_hw_data *ha = vha->hw;
2507 struct req_que *req;
2509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2510 "Entered %s.\n", __func__);
2512 if (vha->vp_idx && vha->qpair)
2513 req = vha->qpair->req;
2515 req = ha->req_q_map[0];
2517 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2519 ql_log(ql_log_warn, vha, 0x1062,
2520 "Failed to allocate login IOCB.\n");
2521 return QLA_MEMORY_ALLOC_FAILED;
2524 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2525 lg->entry_count = 1;
2526 lg->handle = make_handle(req->id, lg->handle);
2527 lg->nport_handle = cpu_to_le16(loop_id);
2528 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2530 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2532 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2533 lg->port_id[0] = al_pa;
2534 lg->port_id[1] = area;
2535 lg->port_id[2] = domain;
2536 lg->vp_index = vha->vp_idx;
2537 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2538 (ha->r_a_tov / 10 * 2) + 2);
2539 if (rval != QLA_SUCCESS) {
2540 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2541 "Failed to issue login IOCB (%x).\n", rval);
2542 } else if (lg->entry_status != 0) {
2543 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2544 "Failed to complete IOCB -- error status (%x).\n",
2546 rval = QLA_FUNCTION_FAILED;
2547 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2548 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2549 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2551 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2552 "Failed to complete IOCB -- completion status (%x) "
2553 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2557 case LSC_SCODE_PORTID_USED:
2558 mb[0] = MBS_PORT_ID_USED;
2559 mb[1] = LSW(iop[1]);
2561 case LSC_SCODE_NPORT_USED:
2562 mb[0] = MBS_LOOP_ID_USED;
2564 case LSC_SCODE_NOLINK:
2565 case LSC_SCODE_NOIOCB:
2566 case LSC_SCODE_NOXCB:
2567 case LSC_SCODE_CMD_FAILED:
2568 case LSC_SCODE_NOFABRIC:
2569 case LSC_SCODE_FW_NOT_READY:
2570 case LSC_SCODE_NOT_LOGGED_IN:
2571 case LSC_SCODE_NOPCB:
2572 case LSC_SCODE_ELS_REJECT:
2573 case LSC_SCODE_CMD_PARAM_ERR:
2574 case LSC_SCODE_NONPORT:
2575 case LSC_SCODE_LOGGED_IN:
2576 case LSC_SCODE_NOFLOGI_ACC:
2578 mb[0] = MBS_COMMAND_ERROR;
2582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2583 "Done %s.\n", __func__);
2585 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2587 mb[0] = MBS_COMMAND_COMPLETE;
2589 if (iop[0] & BIT_4) {
2595 /* Passback COS information. */
2597 if (lg->io_parameter[7] || lg->io_parameter[8])
2598 mb[10] |= BIT_0; /* Class 2. */
2599 if (lg->io_parameter[9] || lg->io_parameter[10])
2600 mb[10] |= BIT_1; /* Class 3. */
2601 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2602 mb[10] |= BIT_7; /* Confirmed Completion
2607 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2613 * qla2x00_login_fabric
2614 * Issue login fabric port mailbox command.
2617 * ha = adapter block pointer.
2618 * loop_id = device loop ID.
2619 * domain = device domain.
2620 * area = device area.
2621 * al_pa = device AL_PA.
2622 * status = pointer for return status.
2623 * opt = command options.
2624 * TARGET_QUEUE_LOCK must be released.
2625 * ADAPTER_STATE_LOCK must be released.
2628 * qla2x00 local function return status code.
2634 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2635 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2639 mbx_cmd_t *mcp = &mc;
2640 struct qla_hw_data *ha = vha->hw;
2642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2643 "Entered %s.\n", __func__);
2645 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2646 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2647 if (HAS_EXTENDED_IDS(ha)) {
2648 mcp->mb[1] = loop_id;
2650 mcp->out_mb |= MBX_10;
2652 mcp->mb[1] = (loop_id << 8) | opt;
2654 mcp->mb[2] = domain;
2655 mcp->mb[3] = area << 8 | al_pa;
2657 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2658 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2660 rval = qla2x00_mailbox_command(vha, mcp);
2662 /* Return mailbox statuses. */
2669 /* COS retrieved from Get-Port-Database mailbox command. */
2673 if (rval != QLA_SUCCESS) {
2674 /* RLU tmp code: need to change main mailbox_command function to
2675 * return ok even when the mailbox completion value is not
2676 * SUCCESS. The caller needs to be responsible to interpret
2677 * the return values of this mailbox command if we're not
2678 * to change too much of the existing code.
2680 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2681 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2682 mcp->mb[0] == 0x4006)
2686 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2687 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2688 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2692 "Done %s.\n", __func__);
2699 * qla2x00_login_local_device
2700 * Issue login loop port mailbox command.
2703 * ha = adapter block pointer.
2704 * loop_id = device loop ID.
2705 * opt = command options.
2708 * Return status code.
2715 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2716 uint16_t *mb_ret, uint8_t opt)
2720 mbx_cmd_t *mcp = &mc;
2721 struct qla_hw_data *ha = vha->hw;
2723 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2724 "Entered %s.\n", __func__);
2726 if (IS_FWI2_CAPABLE(ha))
2727 return qla24xx_login_fabric(vha, fcport->loop_id,
2728 fcport->d_id.b.domain, fcport->d_id.b.area,
2729 fcport->d_id.b.al_pa, mb_ret, opt);
2731 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2732 if (HAS_EXTENDED_IDS(ha))
2733 mcp->mb[1] = fcport->loop_id;
2735 mcp->mb[1] = fcport->loop_id << 8;
2737 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2738 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2739 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2741 rval = qla2x00_mailbox_command(vha, mcp);
2743 /* Return mailbox statuses. */
2744 if (mb_ret != NULL) {
2745 mb_ret[0] = mcp->mb[0];
2746 mb_ret[1] = mcp->mb[1];
2747 mb_ret[6] = mcp->mb[6];
2748 mb_ret[7] = mcp->mb[7];
2751 if (rval != QLA_SUCCESS) {
2752 /* AV tmp code: need to change main mailbox_command function to
2753 * return ok even when the mailbox completion value is not
2754 * SUCCESS. The caller needs to be responsible to interpret
2755 * the return values of this mailbox command if we're not
2756 * to change too much of the existing code.
2758 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2761 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2762 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2763 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2767 "Done %s.\n", __func__);
2774 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2775 uint8_t area, uint8_t al_pa)
2778 struct logio_entry_24xx *lg;
2780 struct qla_hw_data *ha = vha->hw;
2781 struct req_que *req;
2783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2784 "Entered %s.\n", __func__);
2786 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2788 ql_log(ql_log_warn, vha, 0x106e,
2789 "Failed to allocate logout IOCB.\n");
2790 return QLA_MEMORY_ALLOC_FAILED;
2794 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2795 lg->entry_count = 1;
2796 lg->handle = make_handle(req->id, lg->handle);
2797 lg->nport_handle = cpu_to_le16(loop_id);
2799 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2801 lg->port_id[0] = al_pa;
2802 lg->port_id[1] = area;
2803 lg->port_id[2] = domain;
2804 lg->vp_index = vha->vp_idx;
2805 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2806 (ha->r_a_tov / 10 * 2) + 2);
2807 if (rval != QLA_SUCCESS) {
2808 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2809 "Failed to issue logout IOCB (%x).\n", rval);
2810 } else if (lg->entry_status != 0) {
2811 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2812 "Failed to complete IOCB -- error status (%x).\n",
2814 rval = QLA_FUNCTION_FAILED;
2815 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2816 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2817 "Failed to complete IOCB -- completion status (%x) "
2818 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2819 le32_to_cpu(lg->io_parameter[0]),
2820 le32_to_cpu(lg->io_parameter[1]));
2823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2824 "Done %s.\n", __func__);
2827 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2833 * qla2x00_fabric_logout
2834 * Issue logout fabric port mailbox command.
2837 * ha = adapter block pointer.
2838 * loop_id = device loop ID.
2839 * TARGET_QUEUE_LOCK must be released.
2840 * ADAPTER_STATE_LOCK must be released.
2843 * qla2x00 local function return status code.
2849 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2850 uint8_t area, uint8_t al_pa)
2854 mbx_cmd_t *mcp = &mc;
2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2857 "Entered %s.\n", __func__);
2859 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2860 mcp->out_mb = MBX_1|MBX_0;
2861 if (HAS_EXTENDED_IDS(vha->hw)) {
2862 mcp->mb[1] = loop_id;
2864 mcp->out_mb |= MBX_10;
2866 mcp->mb[1] = loop_id << 8;
2869 mcp->in_mb = MBX_1|MBX_0;
2870 mcp->tov = MBX_TOV_SECONDS;
2872 rval = qla2x00_mailbox_command(vha, mcp);
2874 if (rval != QLA_SUCCESS) {
2876 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2877 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2880 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2881 "Done %s.\n", __func__);
2888 * qla2x00_full_login_lip
2889 * Issue full login LIP mailbox command.
2892 * ha = adapter block pointer.
2893 * TARGET_QUEUE_LOCK must be released.
2894 * ADAPTER_STATE_LOCK must be released.
2897 * qla2x00 local function return status code.
2903 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2907 mbx_cmd_t *mcp = &mc;
2909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2910 "Entered %s.\n", __func__);
2912 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2913 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2918 mcp->tov = MBX_TOV_SECONDS;
2920 rval = qla2x00_mailbox_command(vha, mcp);
2922 if (rval != QLA_SUCCESS) {
2924 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2928 "Done %s.\n", __func__);
2935 * qla2x00_get_id_list
2938 * ha = adapter block pointer.
2941 * qla2x00 local function return status code.
2947 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2952 mbx_cmd_t *mcp = &mc;
2954 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2955 "Entered %s.\n", __func__);
2957 if (id_list == NULL)
2958 return QLA_FUNCTION_FAILED;
2960 mcp->mb[0] = MBC_GET_ID_LIST;
2961 mcp->out_mb = MBX_0;
2962 if (IS_FWI2_CAPABLE(vha->hw)) {
2963 mcp->mb[2] = MSW(id_list_dma);
2964 mcp->mb[3] = LSW(id_list_dma);
2965 mcp->mb[6] = MSW(MSD(id_list_dma));
2966 mcp->mb[7] = LSW(MSD(id_list_dma));
2968 mcp->mb[9] = vha->vp_idx;
2969 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2971 mcp->mb[1] = MSW(id_list_dma);
2972 mcp->mb[2] = LSW(id_list_dma);
2973 mcp->mb[3] = MSW(MSD(id_list_dma));
2974 mcp->mb[6] = LSW(MSD(id_list_dma));
2975 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2977 mcp->in_mb = MBX_1|MBX_0;
2978 mcp->tov = MBX_TOV_SECONDS;
2980 rval = qla2x00_mailbox_command(vha, mcp);
2982 if (rval != QLA_SUCCESS) {
2984 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2986 *entries = mcp->mb[1];
2987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2988 "Done %s.\n", __func__);
2995 * qla2x00_get_resource_cnts
2996 * Get current firmware resource counts.
2999 * ha = adapter block pointer.
3002 * qla2x00 local function return status code.
3008 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3010 struct qla_hw_data *ha = vha->hw;
3013 mbx_cmd_t *mcp = &mc;
3015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3016 "Entered %s.\n", __func__);
3018 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3019 mcp->out_mb = MBX_0;
3020 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3021 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3022 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3023 mcp->in_mb |= MBX_12;
3024 mcp->tov = MBX_TOV_SECONDS;
3026 rval = qla2x00_mailbox_command(vha, mcp);
3028 if (rval != QLA_SUCCESS) {
3030 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3031 "Failed mb[0]=%x.\n", mcp->mb[0]);
3033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3034 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3035 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3036 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3037 mcp->mb[11], mcp->mb[12]);
3039 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3040 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3041 ha->cur_fw_xcb_count = mcp->mb[3];
3042 ha->orig_fw_xcb_count = mcp->mb[6];
3043 ha->cur_fw_iocb_count = mcp->mb[7];
3044 ha->orig_fw_iocb_count = mcp->mb[10];
3045 if (ha->flags.npiv_supported)
3046 ha->max_npiv_vports = mcp->mb[11];
3047 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3048 ha->fw_max_fcf_count = mcp->mb[12];
3055 * qla2x00_get_fcal_position_map
3056 * Get FCAL (LILP) position map using mailbox command
3059 * ha = adapter state pointer.
3060 * pos_map = buffer pointer (can be NULL).
3063 * qla2x00 local function return status code.
3069 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3073 mbx_cmd_t *mcp = &mc;
3075 dma_addr_t pmap_dma;
3076 struct qla_hw_data *ha = vha->hw;
3078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3079 "Entered %s.\n", __func__);
3081 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3083 ql_log(ql_log_warn, vha, 0x1080,
3084 "Memory alloc failed.\n");
3085 return QLA_MEMORY_ALLOC_FAILED;
3088 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3089 mcp->mb[2] = MSW(pmap_dma);
3090 mcp->mb[3] = LSW(pmap_dma);
3091 mcp->mb[6] = MSW(MSD(pmap_dma));
3092 mcp->mb[7] = LSW(MSD(pmap_dma));
3093 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3094 mcp->in_mb = MBX_1|MBX_0;
3095 mcp->buf_size = FCAL_MAP_SIZE;
3096 mcp->flags = MBX_DMA_IN;
3097 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3098 rval = qla2x00_mailbox_command(vha, mcp);
3100 if (rval == QLA_SUCCESS) {
3101 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3102 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3103 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3104 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3108 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3110 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3112 if (rval != QLA_SUCCESS) {
3113 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3116 "Done %s.\n", __func__);
3123 * qla2x00_get_link_status
3126 * ha = adapter block pointer.
3127 * loop_id = device loop ID.
3128 * ret_buf = pointer to link status return buffer.
3132 * BIT_0 = mem alloc error.
3133 * BIT_1 = mailbox error.
3136 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3137 struct link_statistics *stats, dma_addr_t stats_dma)
3141 mbx_cmd_t *mcp = &mc;
3142 uint32_t *iter = (uint32_t *)stats;
3143 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3144 struct qla_hw_data *ha = vha->hw;
3146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3147 "Entered %s.\n", __func__);
3149 mcp->mb[0] = MBC_GET_LINK_STATUS;
3150 mcp->mb[2] = MSW(LSD(stats_dma));
3151 mcp->mb[3] = LSW(LSD(stats_dma));
3152 mcp->mb[6] = MSW(MSD(stats_dma));
3153 mcp->mb[7] = LSW(MSD(stats_dma));
3154 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3156 if (IS_FWI2_CAPABLE(ha)) {
3157 mcp->mb[1] = loop_id;
3160 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3161 mcp->in_mb |= MBX_1;
3162 } else if (HAS_EXTENDED_IDS(ha)) {
3163 mcp->mb[1] = loop_id;
3165 mcp->out_mb |= MBX_10|MBX_1;
3167 mcp->mb[1] = loop_id << 8;
3168 mcp->out_mb |= MBX_1;
3170 mcp->tov = MBX_TOV_SECONDS;
3171 mcp->flags = IOCTL_CMD;
3172 rval = qla2x00_mailbox_command(vha, mcp);
3174 if (rval == QLA_SUCCESS) {
3175 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3176 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3177 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3178 rval = QLA_FUNCTION_FAILED;
3180 /* Re-endianize - firmware data is le32. */
3181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3182 "Done %s.\n", __func__);
3183 for ( ; dwords--; iter++)
3188 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3195 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3196 dma_addr_t stats_dma, uint16_t options)
3200 mbx_cmd_t *mcp = &mc;
3201 uint32_t *iter = (uint32_t *)stats;
3202 ushort dwords = sizeof(*stats)/sizeof(*iter);
3204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3205 "Entered %s.\n", __func__);
3207 memset(&mc, 0, sizeof(mc));
3208 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3209 mc.mb[2] = MSW(LSD(stats_dma));
3210 mc.mb[3] = LSW(LSD(stats_dma));
3211 mc.mb[6] = MSW(MSD(stats_dma));
3212 mc.mb[7] = LSW(MSD(stats_dma));
3214 mc.mb[9] = vha->vp_idx;
3215 mc.mb[10] = options;
3217 rval = qla24xx_send_mb_cmd(vha, &mc);
3219 if (rval == QLA_SUCCESS) {
3220 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3221 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3222 "Failed mb[0]=%x.\n", mcp->mb[0]);
3223 rval = QLA_FUNCTION_FAILED;
3225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3226 "Done %s.\n", __func__);
3227 /* Re-endianize - firmware data is le32. */
3228 for ( ; dwords--; iter++)
3233 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3240 qla24xx_abort_command(srb_t *sp)
3243 unsigned long flags = 0;
3245 struct abort_entry_24xx *abt;
3248 fc_port_t *fcport = sp->fcport;
3249 struct scsi_qla_host *vha = fcport->vha;
3250 struct qla_hw_data *ha = vha->hw;
3251 struct req_que *req;
3252 struct qla_qpair *qpair = sp->qpair;
3254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3255 "Entered %s.\n", __func__);
3258 req = sp->qpair->req;
3260 return QLA_ERR_NO_QPAIR;
3262 if (ql2xasynctmfenable)
3263 return qla24xx_async_abort_command(sp);
3265 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3266 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3267 if (req->outstanding_cmds[handle] == sp)
3270 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3271 if (handle == req->num_outstanding_cmds) {
3272 /* Command not found. */
3273 return QLA_ERR_NOT_FOUND;
3276 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3278 ql_log(ql_log_warn, vha, 0x108d,
3279 "Failed to allocate abort IOCB.\n");
3280 return QLA_MEMORY_ALLOC_FAILED;
3283 abt->entry_type = ABORT_IOCB_TYPE;
3284 abt->entry_count = 1;
3285 abt->handle = make_handle(req->id, abt->handle);
3286 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3287 abt->handle_to_abort = make_handle(req->id, handle);
3288 abt->port_id[0] = fcport->d_id.b.al_pa;
3289 abt->port_id[1] = fcport->d_id.b.area;
3290 abt->port_id[2] = fcport->d_id.b.domain;
3291 abt->vp_index = fcport->vha->vp_idx;
3293 abt->req_que_no = cpu_to_le16(req->id);
3294 /* Need to pass original sp */
3295 qla_nvme_abort_set_option(abt, sp);
3297 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3298 if (rval != QLA_SUCCESS) {
3299 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3300 "Failed to issue IOCB (%x).\n", rval);
3301 } else if (abt->entry_status != 0) {
3302 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3303 "Failed to complete IOCB -- error status (%x).\n",
3305 rval = QLA_FUNCTION_FAILED;
3306 } else if (abt->nport_handle != cpu_to_le16(0)) {
3307 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3308 "Failed to complete IOCB -- completion status (%x).\n",
3309 le16_to_cpu(abt->nport_handle));
3310 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3311 rval = QLA_FUNCTION_PARAMETER_ERROR;
3313 rval = QLA_FUNCTION_FAILED;
3315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3316 "Done %s.\n", __func__);
3318 if (rval == QLA_SUCCESS)
3319 qla_nvme_abort_process_comp_status(abt, sp);
3321 qla_wait_nvme_release_cmd_kref(sp);
3323 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3328 struct tsk_mgmt_cmd {
3330 struct tsk_mgmt_entry tsk;
3331 struct sts_entry_24xx sts;
3336 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3337 uint64_t l, int tag)
3340 struct tsk_mgmt_cmd *tsk;
3341 struct sts_entry_24xx *sts;
3343 scsi_qla_host_t *vha;
3344 struct qla_hw_data *ha;
3345 struct req_que *req;
3346 struct qla_qpair *qpair;
3352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3353 "Entered %s.\n", __func__);
3355 if (vha->vp_idx && vha->qpair) {
3361 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3363 ql_log(ql_log_warn, vha, 0x1093,
3364 "Failed to allocate task management IOCB.\n");
3365 return QLA_MEMORY_ALLOC_FAILED;
3368 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3369 tsk->p.tsk.entry_count = 1;
3370 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3371 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3372 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3373 tsk->p.tsk.control_flags = cpu_to_le32(type);
3374 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3375 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3376 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3377 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3378 if (type == TCF_LUN_RESET) {
3379 int_to_scsilun(l, &tsk->p.tsk.lun);
3380 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3381 sizeof(tsk->p.tsk.lun));
3385 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3386 if (rval != QLA_SUCCESS) {
3387 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3388 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3389 } else if (sts->entry_status != 0) {
3390 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3391 "Failed to complete IOCB -- error status (%x).\n",
3393 rval = QLA_FUNCTION_FAILED;
3394 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3395 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3396 "Failed to complete IOCB -- completion status (%x).\n",
3397 le16_to_cpu(sts->comp_status));
3398 rval = QLA_FUNCTION_FAILED;
3399 } else if (le16_to_cpu(sts->scsi_status) &
3400 SS_RESPONSE_INFO_LEN_VALID) {
3401 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3402 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3403 "Ignoring inconsistent data length -- not enough "
3404 "response info (%d).\n",
3405 le32_to_cpu(sts->rsp_data_len));
3406 } else if (sts->data[3]) {
3407 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3408 "Failed to complete IOCB -- response (%x).\n",
3410 rval = QLA_FUNCTION_FAILED;
3414 /* Issue marker IOCB. */
3415 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3416 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3417 if (rval2 != QLA_SUCCESS) {
3418 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3419 "Failed to issue marker IOCB (%x).\n", rval2);
3421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3422 "Done %s.\n", __func__);
3425 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3431 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3433 struct qla_hw_data *ha = fcport->vha->hw;
3435 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3436 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3438 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3442 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3444 struct qla_hw_data *ha = fcport->vha->hw;
3446 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3447 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3449 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3453 qla2x00_system_error(scsi_qla_host_t *vha)
3457 mbx_cmd_t *mcp = &mc;
3458 struct qla_hw_data *ha = vha->hw;
3460 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3461 return QLA_FUNCTION_FAILED;
3463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3464 "Entered %s.\n", __func__);
3466 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3467 mcp->out_mb = MBX_0;
3471 rval = qla2x00_mailbox_command(vha, mcp);
3473 if (rval != QLA_SUCCESS) {
3474 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3477 "Done %s.\n", __func__);
3484 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3488 mbx_cmd_t *mcp = &mc;
3490 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3491 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3492 return QLA_FUNCTION_FAILED;
3494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3495 "Entered %s.\n", __func__);
3497 mcp->mb[0] = MBC_WRITE_SERDES;
3499 if (IS_QLA2031(vha->hw))
3500 mcp->mb[2] = data & 0xff;
3505 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3507 mcp->tov = MBX_TOV_SECONDS;
3509 rval = qla2x00_mailbox_command(vha, mcp);
3511 if (rval != QLA_SUCCESS) {
3512 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3513 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3516 "Done %s.\n", __func__);
3523 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3527 mbx_cmd_t *mcp = &mc;
3529 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3530 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3531 return QLA_FUNCTION_FAILED;
3533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3534 "Entered %s.\n", __func__);
3536 mcp->mb[0] = MBC_READ_SERDES;
3539 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3540 mcp->in_mb = MBX_1|MBX_0;
3541 mcp->tov = MBX_TOV_SECONDS;
3543 rval = qla2x00_mailbox_command(vha, mcp);
3545 if (IS_QLA2031(vha->hw))
3546 *data = mcp->mb[1] & 0xff;
3550 if (rval != QLA_SUCCESS) {
3551 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3552 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3555 "Done %s.\n", __func__);
3562 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3566 mbx_cmd_t *mcp = &mc;
3568 if (!IS_QLA8044(vha->hw))
3569 return QLA_FUNCTION_FAILED;
3571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3572 "Entered %s.\n", __func__);
3574 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3575 mcp->mb[1] = HCS_WRITE_SERDES;
3576 mcp->mb[3] = LSW(addr);
3577 mcp->mb[4] = MSW(addr);
3578 mcp->mb[5] = LSW(data);
3579 mcp->mb[6] = MSW(data);
3580 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3582 mcp->tov = MBX_TOV_SECONDS;
3584 rval = qla2x00_mailbox_command(vha, mcp);
3586 if (rval != QLA_SUCCESS) {
3587 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3588 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3591 "Done %s.\n", __func__);
3598 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3602 mbx_cmd_t *mcp = &mc;
3604 if (!IS_QLA8044(vha->hw))
3605 return QLA_FUNCTION_FAILED;
3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3608 "Entered %s.\n", __func__);
3610 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3611 mcp->mb[1] = HCS_READ_SERDES;
3612 mcp->mb[3] = LSW(addr);
3613 mcp->mb[4] = MSW(addr);
3614 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3615 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3616 mcp->tov = MBX_TOV_SECONDS;
3618 rval = qla2x00_mailbox_command(vha, mcp);
3620 *data = mcp->mb[2] << 16 | mcp->mb[1];
3622 if (rval != QLA_SUCCESS) {
3623 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3624 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3627 "Done %s.\n", __func__);
3634 * qla2x00_set_serdes_params() -
3636 * @sw_em_1g: serial link options
3637 * @sw_em_2g: serial link options
3638 * @sw_em_4g: serial link options
3643 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3644 uint16_t sw_em_2g, uint16_t sw_em_4g)
3648 mbx_cmd_t *mcp = &mc;
3650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3651 "Entered %s.\n", __func__);
3653 mcp->mb[0] = MBC_SERDES_PARAMS;
3655 mcp->mb[2] = sw_em_1g | BIT_15;
3656 mcp->mb[3] = sw_em_2g | BIT_15;
3657 mcp->mb[4] = sw_em_4g | BIT_15;
3658 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3660 mcp->tov = MBX_TOV_SECONDS;
3662 rval = qla2x00_mailbox_command(vha, mcp);
3664 if (rval != QLA_SUCCESS) {
3666 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3667 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3671 "Done %s.\n", __func__);
3678 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3682 mbx_cmd_t *mcp = &mc;
3684 if (!IS_FWI2_CAPABLE(vha->hw))
3685 return QLA_FUNCTION_FAILED;
3687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3688 "Entered %s.\n", __func__);
3690 mcp->mb[0] = MBC_STOP_FIRMWARE;
3692 mcp->out_mb = MBX_1|MBX_0;
3696 rval = qla2x00_mailbox_command(vha, mcp);
3698 if (rval != QLA_SUCCESS) {
3699 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3700 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3701 rval = QLA_INVALID_COMMAND;
3703 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3704 "Done %s.\n", __func__);
3711 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3716 mbx_cmd_t *mcp = &mc;
3718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3719 "Entered %s.\n", __func__);
3721 if (!IS_FWI2_CAPABLE(vha->hw))
3722 return QLA_FUNCTION_FAILED;
3724 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3725 return QLA_FUNCTION_FAILED;
3727 mcp->mb[0] = MBC_TRACE_CONTROL;
3728 mcp->mb[1] = TC_EFT_ENABLE;
3729 mcp->mb[2] = LSW(eft_dma);
3730 mcp->mb[3] = MSW(eft_dma);
3731 mcp->mb[4] = LSW(MSD(eft_dma));
3732 mcp->mb[5] = MSW(MSD(eft_dma));
3733 mcp->mb[6] = buffers;
3734 mcp->mb[7] = TC_AEN_DISABLE;
3735 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3736 mcp->in_mb = MBX_1|MBX_0;
3737 mcp->tov = MBX_TOV_SECONDS;
3739 rval = qla2x00_mailbox_command(vha, mcp);
3740 if (rval != QLA_SUCCESS) {
3741 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3742 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3743 rval, mcp->mb[0], mcp->mb[1]);
3745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3746 "Done %s.\n", __func__);
3753 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3757 mbx_cmd_t *mcp = &mc;
3759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3760 "Entered %s.\n", __func__);
3762 if (!IS_FWI2_CAPABLE(vha->hw))
3763 return QLA_FUNCTION_FAILED;
3765 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3766 return QLA_FUNCTION_FAILED;
3768 mcp->mb[0] = MBC_TRACE_CONTROL;
3769 mcp->mb[1] = TC_EFT_DISABLE;
3770 mcp->out_mb = MBX_1|MBX_0;
3771 mcp->in_mb = MBX_1|MBX_0;
3772 mcp->tov = MBX_TOV_SECONDS;
3774 rval = qla2x00_mailbox_command(vha, mcp);
3775 if (rval != QLA_SUCCESS) {
3776 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3777 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3778 rval, mcp->mb[0], mcp->mb[1]);
3780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3781 "Done %s.\n", __func__);
3788 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3789 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3793 mbx_cmd_t *mcp = &mc;
3795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3796 "Entered %s.\n", __func__);
3798 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3799 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3800 !IS_QLA28XX(vha->hw))
3801 return QLA_FUNCTION_FAILED;
3803 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3804 return QLA_FUNCTION_FAILED;
3806 mcp->mb[0] = MBC_TRACE_CONTROL;
3807 mcp->mb[1] = TC_FCE_ENABLE;
3808 mcp->mb[2] = LSW(fce_dma);
3809 mcp->mb[3] = MSW(fce_dma);
3810 mcp->mb[4] = LSW(MSD(fce_dma));
3811 mcp->mb[5] = MSW(MSD(fce_dma));
3812 mcp->mb[6] = buffers;
3813 mcp->mb[7] = TC_AEN_DISABLE;
3815 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3816 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3817 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3819 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3820 mcp->tov = MBX_TOV_SECONDS;
3822 rval = qla2x00_mailbox_command(vha, mcp);
3823 if (rval != QLA_SUCCESS) {
3824 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3825 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3826 rval, mcp->mb[0], mcp->mb[1]);
3828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3829 "Done %s.\n", __func__);
3832 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3841 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3845 mbx_cmd_t *mcp = &mc;
3847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3848 "Entered %s.\n", __func__);
3850 if (!IS_FWI2_CAPABLE(vha->hw))
3851 return QLA_FUNCTION_FAILED;
3853 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3854 return QLA_FUNCTION_FAILED;
3856 mcp->mb[0] = MBC_TRACE_CONTROL;
3857 mcp->mb[1] = TC_FCE_DISABLE;
3858 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3859 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3860 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3862 mcp->tov = MBX_TOV_SECONDS;
3864 rval = qla2x00_mailbox_command(vha, mcp);
3865 if (rval != QLA_SUCCESS) {
3866 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3867 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3868 rval, mcp->mb[0], mcp->mb[1]);
3870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3871 "Done %s.\n", __func__);
3874 *wr = (uint64_t) mcp->mb[5] << 48 |
3875 (uint64_t) mcp->mb[4] << 32 |
3876 (uint64_t) mcp->mb[3] << 16 |
3877 (uint64_t) mcp->mb[2];
3879 *rd = (uint64_t) mcp->mb[9] << 48 |
3880 (uint64_t) mcp->mb[8] << 32 |
3881 (uint64_t) mcp->mb[7] << 16 |
3882 (uint64_t) mcp->mb[6];
3889 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3890 uint16_t *port_speed, uint16_t *mb)
3894 mbx_cmd_t *mcp = &mc;
3896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3897 "Entered %s.\n", __func__);
3899 if (!IS_IIDMA_CAPABLE(vha->hw))
3900 return QLA_FUNCTION_FAILED;
3902 mcp->mb[0] = MBC_PORT_PARAMS;
3903 mcp->mb[1] = loop_id;
3904 mcp->mb[2] = mcp->mb[3] = 0;
3905 mcp->mb[9] = vha->vp_idx;
3906 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3907 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3908 mcp->tov = MBX_TOV_SECONDS;
3910 rval = qla2x00_mailbox_command(vha, mcp);
3912 /* Return mailbox statuses. */
3919 if (rval != QLA_SUCCESS) {
3920 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3923 "Done %s.\n", __func__);
3925 *port_speed = mcp->mb[3];
3932 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3933 uint16_t port_speed, uint16_t *mb)
3937 mbx_cmd_t *mcp = &mc;
3939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3940 "Entered %s.\n", __func__);
3942 if (!IS_IIDMA_CAPABLE(vha->hw))
3943 return QLA_FUNCTION_FAILED;
3945 mcp->mb[0] = MBC_PORT_PARAMS;
3946 mcp->mb[1] = loop_id;
3948 mcp->mb[3] = port_speed & 0x3F;
3949 mcp->mb[9] = vha->vp_idx;
3950 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3951 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3952 mcp->tov = MBX_TOV_SECONDS;
3954 rval = qla2x00_mailbox_command(vha, mcp);
3956 /* Return mailbox statuses. */
3963 if (rval != QLA_SUCCESS) {
3964 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3965 "Failed=%x.\n", rval);
3967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3968 "Done %s.\n", __func__);
3975 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3976 struct vp_rpt_id_entry_24xx *rptid_entry)
3978 struct qla_hw_data *ha = vha->hw;
3979 scsi_qla_host_t *vp = NULL;
3980 unsigned long flags;
3983 struct fc_port *fcport;
3985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3986 "Entered %s.\n", __func__);
3988 if (rptid_entry->entry_status != 0)
3991 id.b.domain = rptid_entry->port_id[2];
3992 id.b.area = rptid_entry->port_id[1];
3993 id.b.al_pa = rptid_entry->port_id[0];
3995 ha->flags.n2n_ae = 0;
3997 if (rptid_entry->format == 0) {
3999 ql_dbg(ql_dbg_async, vha, 0x10b7,
4000 "Format 0 : Number of VPs setup %d, number of "
4001 "VPs acquired %d.\n", rptid_entry->vp_setup,
4002 rptid_entry->vp_acquired);
4003 ql_dbg(ql_dbg_async, vha, 0x10b8,
4004 "Primary port id %02x%02x%02x.\n",
4005 rptid_entry->port_id[2], rptid_entry->port_id[1],
4006 rptid_entry->port_id[0]);
4007 ha->current_topology = ISP_CFG_NL;
4008 qlt_update_host_map(vha, id);
4010 } else if (rptid_entry->format == 1) {
4012 ql_dbg(ql_dbg_async, vha, 0x10b9,
4013 "Format 1: VP[%d] enabled - status %d - with "
4014 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4015 rptid_entry->vp_status,
4016 rptid_entry->port_id[2], rptid_entry->port_id[1],
4017 rptid_entry->port_id[0]);
4018 ql_dbg(ql_dbg_async, vha, 0x5075,
4019 "Format 1: Remote WWPN %8phC.\n",
4020 rptid_entry->u.f1.port_name);
4022 ql_dbg(ql_dbg_async, vha, 0x5075,
4023 "Format 1: WWPN %8phC.\n",
4026 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4028 ha->current_topology = ISP_CFG_N;
4029 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4030 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4031 fcport->scan_state = QLA_FCPORT_SCAN;
4032 fcport->n2n_flag = 0;
4035 if (wwn_to_u64(vha->port_name) >
4036 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4038 vha->d_id.b.al_pa = 1;
4039 ha->flags.n2n_bigger = 1;
4042 ql_dbg(ql_dbg_async, vha, 0x5075,
4043 "Format 1: assign local id %x remote id %x\n",
4044 vha->d_id.b24, id.b24);
4046 ql_dbg(ql_dbg_async, vha, 0x5075,
4047 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4048 rptid_entry->u.f1.port_name);
4049 ha->flags.n2n_bigger = 0;
4052 fcport = qla2x00_find_fcport_by_wwpn(vha,
4053 rptid_entry->u.f1.port_name, 1);
4054 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4058 fcport->plogi_nack_done_deadline = jiffies + HZ;
4059 fcport->dm_login_expire = jiffies +
4060 QLA_N2N_WAIT_TIME * HZ;
4061 fcport->scan_state = QLA_FCPORT_FOUND;
4062 fcport->n2n_flag = 1;
4063 fcport->keep_nport_handle = 1;
4064 fcport->login_retry = vha->hw->login_retry_count;
4065 fcport->fc4_type = FS_FC4TYPE_FCP;
4066 if (vha->flags.nvme_enabled)
4067 fcport->fc4_type |= FS_FC4TYPE_NVME;
4069 if (wwn_to_u64(vha->port_name) >
4070 wwn_to_u64(fcport->port_name)) {
4074 switch (fcport->disc_state) {
4076 set_bit(RELOGIN_NEEDED,
4079 case DSC_DELETE_PEND:
4082 qlt_schedule_sess_for_deletion(fcport);
4086 qla24xx_post_newsess_work(vha, &id,
4087 rptid_entry->u.f1.port_name,
4088 rptid_entry->u.f1.node_name,
4093 /* if our portname is higher then initiate N2N login */
4095 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4098 ha->current_topology = ISP_CFG_FL;
4101 ha->current_topology = ISP_CFG_F;
4107 ha->flags.gpsc_supported = 1;
4108 ha->current_topology = ISP_CFG_F;
4109 /* buffer to buffer credit flag */
4110 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4112 if (rptid_entry->vp_idx == 0) {
4113 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4114 /* FA-WWN is only for physical port */
4115 if (qla_ini_mode_enabled(vha) &&
4116 ha->flags.fawwpn_enabled &&
4117 (rptid_entry->u.f1.flags &
4119 memcpy(vha->port_name,
4120 rptid_entry->u.f1.port_name,
4124 qlt_update_host_map(vha, id);
4127 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4128 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4130 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4131 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4132 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4133 "Could not acquire ID for VP[%d].\n",
4134 rptid_entry->vp_idx);
4139 spin_lock_irqsave(&ha->vport_slock, flags);
4140 list_for_each_entry(vp, &ha->vp_list, list) {
4141 if (rptid_entry->vp_idx == vp->vp_idx) {
4146 spin_unlock_irqrestore(&ha->vport_slock, flags);
4151 qlt_update_host_map(vp, id);
4154 * Cannot configure here as we are still sitting on the
4155 * response queue. Handle it in dpc context.
4157 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4158 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4159 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4161 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4162 qla2xxx_wake_dpc(vha);
4163 } else if (rptid_entry->format == 2) {
4164 ql_dbg(ql_dbg_async, vha, 0x505f,
4165 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4166 rptid_entry->port_id[2], rptid_entry->port_id[1],
4167 rptid_entry->port_id[0]);
4169 ql_dbg(ql_dbg_async, vha, 0x5075,
4170 "N2N: Remote WWPN %8phC.\n",
4171 rptid_entry->u.f2.port_name);
4173 /* N2N. direct connect */
4174 ha->current_topology = ISP_CFG_N;
4175 ha->flags.rida_fmt2 = 1;
4176 vha->d_id.b.domain = rptid_entry->port_id[2];
4177 vha->d_id.b.area = rptid_entry->port_id[1];
4178 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4180 ha->flags.n2n_ae = 1;
4181 spin_lock_irqsave(&ha->vport_slock, flags);
4182 qlt_update_vp_map(vha, SET_AL_PA);
4183 spin_unlock_irqrestore(&ha->vport_slock, flags);
4185 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4186 fcport->scan_state = QLA_FCPORT_SCAN;
4187 fcport->n2n_flag = 0;
4190 fcport = qla2x00_find_fcport_by_wwpn(vha,
4191 rptid_entry->u.f2.port_name, 1);
4194 fcport->login_retry = vha->hw->login_retry_count;
4195 fcport->plogi_nack_done_deadline = jiffies + HZ;
4196 fcport->scan_state = QLA_FCPORT_FOUND;
4197 fcport->keep_nport_handle = 1;
4198 fcport->n2n_flag = 1;
4199 fcport->d_id.b.domain =
4200 rptid_entry->u.f2.remote_nport_id[2];
4201 fcport->d_id.b.area =
4202 rptid_entry->u.f2.remote_nport_id[1];
4203 fcport->d_id.b.al_pa =
4204 rptid_entry->u.f2.remote_nport_id[0];
4207 * For the case where remote port sending PRLO, FW
4208 * sends up RIDA Format 2 as an indication of session
4209 * loss. In other word, FW state change from PRLI
4210 * complete back to PLOGI complete. Delete the
4211 * session and let relogin drive the reconnect.
4213 if (atomic_read(&fcport->state) == FCS_ONLINE)
4214 qlt_schedule_sess_for_deletion(fcport);
4220 * qla24xx_modify_vp_config
4221 * Change VP configuration for vha
4224 * vha = adapter block pointer.
4227 * qla2xxx local function return status code.
4233 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4236 struct vp_config_entry_24xx *vpmod;
4237 dma_addr_t vpmod_dma;
4238 struct qla_hw_data *ha = vha->hw;
4239 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4241 /* This can be called by the parent */
4243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4244 "Entered %s.\n", __func__);
4246 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4248 ql_log(ql_log_warn, vha, 0x10bc,
4249 "Failed to allocate modify VP IOCB.\n");
4250 return QLA_MEMORY_ALLOC_FAILED;
4253 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4254 vpmod->entry_count = 1;
4255 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4256 vpmod->vp_count = 1;
4257 vpmod->vp_index1 = vha->vp_idx;
4258 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4260 qlt_modify_vp_config(vha, vpmod);
4262 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4263 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4264 vpmod->entry_count = 1;
4266 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4267 if (rval != QLA_SUCCESS) {
4268 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4269 "Failed to issue VP config IOCB (%x).\n", rval);
4270 } else if (vpmod->comp_status != 0) {
4271 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4272 "Failed to complete IOCB -- error status (%x).\n",
4273 vpmod->comp_status);
4274 rval = QLA_FUNCTION_FAILED;
4275 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4276 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4277 "Failed to complete IOCB -- completion status (%x).\n",
4278 le16_to_cpu(vpmod->comp_status));
4279 rval = QLA_FUNCTION_FAILED;
4282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4283 "Done %s.\n", __func__);
4284 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4286 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4292 * qla2x00_send_change_request
4293 * Receive or disable RSCN request from fabric controller
4296 * ha = adapter block pointer
4297 * format = registration format:
4299 * 1 - Fabric detected registration
4300 * 2 - N_port detected registration
4301 * 3 - Full registration
4302 * FF - clear registration
4303 * vp_idx = Virtual port index
4306 * qla2x00 local function return status code.
4313 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4318 mbx_cmd_t *mcp = &mc;
4320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4321 "Entered %s.\n", __func__);
4323 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4324 mcp->mb[1] = format;
4325 mcp->mb[9] = vp_idx;
4326 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4327 mcp->in_mb = MBX_0|MBX_1;
4328 mcp->tov = MBX_TOV_SECONDS;
4330 rval = qla2x00_mailbox_command(vha, mcp);
4332 if (rval == QLA_SUCCESS) {
4333 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4343 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4348 mbx_cmd_t *mcp = &mc;
4350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4351 "Entered %s.\n", __func__);
4353 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4354 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4355 mcp->mb[8] = MSW(addr);
4357 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4359 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4360 mcp->out_mb = MBX_0;
4362 mcp->mb[1] = LSW(addr);
4363 mcp->mb[2] = MSW(req_dma);
4364 mcp->mb[3] = LSW(req_dma);
4365 mcp->mb[6] = MSW(MSD(req_dma));
4366 mcp->mb[7] = LSW(MSD(req_dma));
4367 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4368 if (IS_FWI2_CAPABLE(vha->hw)) {
4369 mcp->mb[4] = MSW(size);
4370 mcp->mb[5] = LSW(size);
4371 mcp->out_mb |= MBX_5|MBX_4;
4373 mcp->mb[4] = LSW(size);
4374 mcp->out_mb |= MBX_4;
4378 mcp->tov = MBX_TOV_SECONDS;
4380 rval = qla2x00_mailbox_command(vha, mcp);
4382 if (rval != QLA_SUCCESS) {
4383 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4384 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4387 "Done %s.\n", __func__);
4392 /* 84XX Support **************************************************************/
4394 struct cs84xx_mgmt_cmd {
4396 struct verify_chip_entry_84xx req;
4397 struct verify_chip_rsp_84xx rsp;
4402 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4405 struct cs84xx_mgmt_cmd *mn;
4408 unsigned long flags;
4409 struct qla_hw_data *ha = vha->hw;
4411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4412 "Entered %s.\n", __func__);
4414 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4416 return QLA_MEMORY_ALLOC_FAILED;
4420 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4421 /* Diagnostic firmware? */
4422 /* options |= MENLO_DIAG_FW; */
4423 /* We update the firmware with only one data sequence. */
4424 options |= VCO_END_OF_DATA;
4428 memset(mn, 0, sizeof(*mn));
4429 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4430 mn->p.req.entry_count = 1;
4431 mn->p.req.options = cpu_to_le16(options);
4433 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4434 "Dump of Verify Request.\n");
4435 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4438 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4439 if (rval != QLA_SUCCESS) {
4440 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4441 "Failed to issue verify IOCB (%x).\n", rval);
4445 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4446 "Dump of Verify Response.\n");
4447 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4450 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4451 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4452 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4454 "cs=%x fc=%x.\n", status[0], status[1]);
4456 if (status[0] != CS_COMPLETE) {
4457 rval = QLA_FUNCTION_FAILED;
4458 if (!(options & VCO_DONT_UPDATE_FW)) {
4459 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4460 "Firmware update failed. Retrying "
4461 "without update firmware.\n");
4462 options |= VCO_DONT_UPDATE_FW;
4463 options &= ~VCO_FORCE_UPDATE;
4467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4468 "Firmware updated to %x.\n",
4469 le32_to_cpu(mn->p.rsp.fw_ver));
4471 /* NOTE: we only update OP firmware. */
4472 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4473 ha->cs84xx->op_fw_version =
4474 le32_to_cpu(mn->p.rsp.fw_ver);
4475 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4481 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4483 if (rval != QLA_SUCCESS) {
4484 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4485 "Failed=%x.\n", rval);
4487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4488 "Done %s.\n", __func__);
4495 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4498 unsigned long flags;
4500 mbx_cmd_t *mcp = &mc;
4501 struct qla_hw_data *ha = vha->hw;
4503 if (!ha->flags.fw_started)
4506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4507 "Entered %s.\n", __func__);
4509 if (IS_SHADOW_REG_CAPABLE(ha))
4510 req->options |= BIT_13;
4512 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4513 mcp->mb[1] = req->options;
4514 mcp->mb[2] = MSW(LSD(req->dma));
4515 mcp->mb[3] = LSW(LSD(req->dma));
4516 mcp->mb[6] = MSW(MSD(req->dma));
4517 mcp->mb[7] = LSW(MSD(req->dma));
4518 mcp->mb[5] = req->length;
4520 mcp->mb[10] = req->rsp->id;
4521 mcp->mb[12] = req->qos;
4522 mcp->mb[11] = req->vp_idx;
4523 mcp->mb[13] = req->rid;
4524 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4527 mcp->mb[4] = req->id;
4528 /* que in ptr index */
4530 /* que out ptr index */
4531 mcp->mb[9] = *req->out_ptr = 0;
4532 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4533 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4535 mcp->flags = MBX_DMA_OUT;
4536 mcp->tov = MBX_TOV_SECONDS * 2;
4538 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4540 mcp->in_mb |= MBX_1;
4541 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4542 mcp->out_mb |= MBX_15;
4543 /* debug q create issue in SR-IOV */
4544 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4547 spin_lock_irqsave(&ha->hardware_lock, flags);
4548 if (!(req->options & BIT_0)) {
4549 wrt_reg_dword(req->req_q_in, 0);
4550 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4551 wrt_reg_dword(req->req_q_out, 0);
4553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4555 rval = qla2x00_mailbox_command(vha, mcp);
4556 if (rval != QLA_SUCCESS) {
4557 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4558 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4561 "Done %s.\n", __func__);
4568 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4571 unsigned long flags;
4573 mbx_cmd_t *mcp = &mc;
4574 struct qla_hw_data *ha = vha->hw;
4576 if (!ha->flags.fw_started)
4579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4580 "Entered %s.\n", __func__);
4582 if (IS_SHADOW_REG_CAPABLE(ha))
4583 rsp->options |= BIT_13;
4585 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4586 mcp->mb[1] = rsp->options;
4587 mcp->mb[2] = MSW(LSD(rsp->dma));
4588 mcp->mb[3] = LSW(LSD(rsp->dma));
4589 mcp->mb[6] = MSW(MSD(rsp->dma));
4590 mcp->mb[7] = LSW(MSD(rsp->dma));
4591 mcp->mb[5] = rsp->length;
4592 mcp->mb[14] = rsp->msix->entry;
4593 mcp->mb[13] = rsp->rid;
4594 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4597 mcp->mb[4] = rsp->id;
4598 /* que in ptr index */
4599 mcp->mb[8] = *rsp->in_ptr = 0;
4600 /* que out ptr index */
4602 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4603 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4605 mcp->flags = MBX_DMA_OUT;
4606 mcp->tov = MBX_TOV_SECONDS * 2;
4608 if (IS_QLA81XX(ha)) {
4609 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4610 mcp->in_mb |= MBX_1;
4611 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4612 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4613 mcp->in_mb |= MBX_1;
4614 /* debug q create issue in SR-IOV */
4615 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4618 spin_lock_irqsave(&ha->hardware_lock, flags);
4619 if (!(rsp->options & BIT_0)) {
4620 wrt_reg_dword(rsp->rsp_q_out, 0);
4621 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4622 wrt_reg_dword(rsp->rsp_q_in, 0);
4625 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4627 rval = qla2x00_mailbox_command(vha, mcp);
4628 if (rval != QLA_SUCCESS) {
4629 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4630 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4633 "Done %s.\n", __func__);
4640 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4644 mbx_cmd_t *mcp = &mc;
4646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4647 "Entered %s.\n", __func__);
4649 mcp->mb[0] = MBC_IDC_ACK;
4650 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4651 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4653 mcp->tov = MBX_TOV_SECONDS;
4655 rval = qla2x00_mailbox_command(vha, mcp);
4657 if (rval != QLA_SUCCESS) {
4658 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4659 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4662 "Done %s.\n", __func__);
4669 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4673 mbx_cmd_t *mcp = &mc;
4675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4676 "Entered %s.\n", __func__);
4678 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4679 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4680 return QLA_FUNCTION_FAILED;
4682 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4683 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4684 mcp->out_mb = MBX_1|MBX_0;
4685 mcp->in_mb = MBX_1|MBX_0;
4686 mcp->tov = MBX_TOV_SECONDS;
4688 rval = qla2x00_mailbox_command(vha, mcp);
4690 if (rval != QLA_SUCCESS) {
4691 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4693 rval, mcp->mb[0], mcp->mb[1]);
4695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4696 "Done %s.\n", __func__);
4697 *sector_size = mcp->mb[1];
4704 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4708 mbx_cmd_t *mcp = &mc;
4710 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4711 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4712 return QLA_FUNCTION_FAILED;
4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4715 "Entered %s.\n", __func__);
4717 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4718 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4719 FAC_OPT_CMD_WRITE_PROTECT;
4720 mcp->out_mb = MBX_1|MBX_0;
4721 mcp->in_mb = MBX_1|MBX_0;
4722 mcp->tov = MBX_TOV_SECONDS;
4724 rval = qla2x00_mailbox_command(vha, mcp);
4726 if (rval != QLA_SUCCESS) {
4727 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4728 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4729 rval, mcp->mb[0], mcp->mb[1]);
4731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4732 "Done %s.\n", __func__);
4739 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4743 mbx_cmd_t *mcp = &mc;
4745 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4746 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4747 return QLA_FUNCTION_FAILED;
4749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4750 "Entered %s.\n", __func__);
4752 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4753 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4754 mcp->mb[2] = LSW(start);
4755 mcp->mb[3] = MSW(start);
4756 mcp->mb[4] = LSW(finish);
4757 mcp->mb[5] = MSW(finish);
4758 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4759 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4760 mcp->tov = MBX_TOV_SECONDS;
4762 rval = qla2x00_mailbox_command(vha, mcp);
4764 if (rval != QLA_SUCCESS) {
4765 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4766 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4767 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4770 "Done %s.\n", __func__);
4777 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4779 int rval = QLA_SUCCESS;
4781 mbx_cmd_t *mcp = &mc;
4782 struct qla_hw_data *ha = vha->hw;
4784 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4785 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4789 "Entered %s.\n", __func__);
4791 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4792 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4793 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4794 mcp->out_mb = MBX_1|MBX_0;
4795 mcp->in_mb = MBX_1|MBX_0;
4796 mcp->tov = MBX_TOV_SECONDS;
4798 rval = qla2x00_mailbox_command(vha, mcp);
4800 if (rval != QLA_SUCCESS) {
4801 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4802 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4803 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4806 "Done %s.\n", __func__);
4813 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4817 mbx_cmd_t *mcp = &mc;
4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4820 "Entered %s.\n", __func__);
4822 mcp->mb[0] = MBC_RESTART_MPI_FW;
4823 mcp->out_mb = MBX_0;
4824 mcp->in_mb = MBX_0|MBX_1;
4825 mcp->tov = MBX_TOV_SECONDS;
4827 rval = qla2x00_mailbox_command(vha, mcp);
4829 if (rval != QLA_SUCCESS) {
4830 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4831 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4832 rval, mcp->mb[0], mcp->mb[1]);
4834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4835 "Done %s.\n", __func__);
4842 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4846 mbx_cmd_t *mcp = &mc;
4850 struct qla_hw_data *ha = vha->hw;
4852 if (!IS_P3P_TYPE(ha))
4853 return QLA_FUNCTION_FAILED;
4855 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4856 "Entered %s.\n", __func__);
4858 str = (__force __le16 *)version;
4859 len = strlen(version);
4861 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4862 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4863 mcp->out_mb = MBX_1|MBX_0;
4864 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4865 mcp->mb[i] = le16_to_cpup(str);
4866 mcp->out_mb |= 1<<i;
4868 for (; i < 16; i++) {
4870 mcp->out_mb |= 1<<i;
4872 mcp->in_mb = MBX_1|MBX_0;
4873 mcp->tov = MBX_TOV_SECONDS;
4875 rval = qla2x00_mailbox_command(vha, mcp);
4877 if (rval != QLA_SUCCESS) {
4878 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4879 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4882 "Done %s.\n", __func__);
4889 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4893 mbx_cmd_t *mcp = &mc;
4898 struct qla_hw_data *ha = vha->hw;
4900 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4902 return QLA_FUNCTION_FAILED;
4904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4905 "Entered %s.\n", __func__);
4907 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4909 ql_log(ql_log_warn, vha, 0x117f,
4910 "Failed to allocate driver version param.\n");
4911 return QLA_MEMORY_ALLOC_FAILED;
4914 memcpy(str, "\x7\x3\x11\x0", 4);
4916 len = dwlen * 4 - 4;
4917 memset(str + 4, 0, len);
4918 if (len > strlen(version))
4919 len = strlen(version);
4920 memcpy(str + 4, version, len);
4922 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4923 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4924 mcp->mb[2] = MSW(LSD(str_dma));
4925 mcp->mb[3] = LSW(LSD(str_dma));
4926 mcp->mb[6] = MSW(MSD(str_dma));
4927 mcp->mb[7] = LSW(MSD(str_dma));
4928 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4929 mcp->in_mb = MBX_1|MBX_0;
4930 mcp->tov = MBX_TOV_SECONDS;
4932 rval = qla2x00_mailbox_command(vha, mcp);
4934 if (rval != QLA_SUCCESS) {
4935 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4936 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4939 "Done %s.\n", __func__);
4942 dma_pool_free(ha->s_dma_pool, str, str_dma);
4948 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4949 void *buf, uint16_t bufsiz)
4953 mbx_cmd_t *mcp = &mc;
4956 if (!IS_FWI2_CAPABLE(vha->hw))
4957 return QLA_FUNCTION_FAILED;
4959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4960 "Entered %s.\n", __func__);
4962 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4963 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4964 mcp->mb[2] = MSW(buf_dma);
4965 mcp->mb[3] = LSW(buf_dma);
4966 mcp->mb[6] = MSW(MSD(buf_dma));
4967 mcp->mb[7] = LSW(MSD(buf_dma));
4968 mcp->mb[8] = bufsiz/4;
4969 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4970 mcp->in_mb = MBX_1|MBX_0;
4971 mcp->tov = MBX_TOV_SECONDS;
4973 rval = qla2x00_mailbox_command(vha, mcp);
4975 if (rval != QLA_SUCCESS) {
4976 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4977 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4980 "Done %s.\n", __func__);
4981 bp = (uint32_t *) buf;
4982 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4983 *bp = le32_to_cpu((__force __le32)*bp);
4989 #define PUREX_CMD_COUNT 4
4991 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4995 mbx_cmd_t *mcp = &mc;
4996 uint8_t *els_cmd_map;
4997 uint8_t active_cnt = 0;
4998 dma_addr_t els_cmd_map_dma;
4999 uint8_t cmd_opcode[PUREX_CMD_COUNT];
5000 uint8_t i, index, purex_bit;
5001 struct qla_hw_data *ha = vha->hw;
5003 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5004 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5008 "Entered %s.\n", __func__);
5010 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5011 &els_cmd_map_dma, GFP_KERNEL);
5013 ql_log(ql_log_warn, vha, 0x7101,
5014 "Failed to allocate RDP els command param.\n");
5015 return QLA_MEMORY_ALLOC_FAILED;
5018 /* List of Purex ELS */
5019 if (ql2xrdpenable) {
5020 cmd_opcode[active_cnt] = ELS_RDP;
5023 if (ha->flags.scm_supported_f) {
5024 cmd_opcode[active_cnt] = ELS_FPIN;
5027 if (ha->flags.edif_enabled) {
5028 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5032 for (i = 0; i < active_cnt; i++) {
5033 index = cmd_opcode[i] / 8;
5034 purex_bit = cmd_opcode[i] % 8;
5035 els_cmd_map[index] |= 1 << purex_bit;
5038 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5039 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5040 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5041 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5042 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5043 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5044 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5045 mcp->in_mb = MBX_1|MBX_0;
5046 mcp->tov = MBX_TOV_SECONDS;
5047 mcp->flags = MBX_DMA_OUT;
5048 mcp->buf_size = ELS_CMD_MAP_SIZE;
5049 rval = qla2x00_mailbox_command(vha, mcp);
5051 if (rval != QLA_SUCCESS) {
5052 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5053 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5056 "Done %s.\n", __func__);
5059 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5060 els_cmd_map, els_cmd_map_dma);
5066 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5070 mbx_cmd_t *mcp = &mc;
5072 if (!IS_FWI2_CAPABLE(vha->hw))
5073 return QLA_FUNCTION_FAILED;
5075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5076 "Entered %s.\n", __func__);
5078 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5079 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5080 mcp->out_mb = MBX_1|MBX_0;
5081 mcp->in_mb = MBX_1|MBX_0;
5082 mcp->tov = MBX_TOV_SECONDS;
5084 rval = qla2x00_mailbox_command(vha, mcp);
5087 if (rval != QLA_SUCCESS) {
5088 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5089 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5092 "Done %s.\n", __func__);
5099 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5100 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5104 mbx_cmd_t *mcp = &mc;
5105 struct qla_hw_data *ha = vha->hw;
5107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5108 "Entered %s.\n", __func__);
5110 if (!IS_FWI2_CAPABLE(ha))
5111 return QLA_FUNCTION_FAILED;
5116 mcp->mb[0] = MBC_READ_SFP;
5118 mcp->mb[2] = MSW(LSD(sfp_dma));
5119 mcp->mb[3] = LSW(LSD(sfp_dma));
5120 mcp->mb[6] = MSW(MSD(sfp_dma));
5121 mcp->mb[7] = LSW(MSD(sfp_dma));
5125 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5126 mcp->in_mb = MBX_1|MBX_0;
5127 mcp->tov = MBX_TOV_SECONDS;
5129 rval = qla2x00_mailbox_command(vha, mcp);
5134 if (rval != QLA_SUCCESS) {
5135 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5136 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5137 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5138 /* sfp is not there */
5139 rval = QLA_INTERFACE_ERROR;
5142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5143 "Done %s.\n", __func__);
5150 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5151 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5155 mbx_cmd_t *mcp = &mc;
5156 struct qla_hw_data *ha = vha->hw;
5158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5159 "Entered %s.\n", __func__);
5161 if (!IS_FWI2_CAPABLE(ha))
5162 return QLA_FUNCTION_FAILED;
5170 mcp->mb[0] = MBC_WRITE_SFP;
5172 mcp->mb[2] = MSW(LSD(sfp_dma));
5173 mcp->mb[3] = LSW(LSD(sfp_dma));
5174 mcp->mb[6] = MSW(MSD(sfp_dma));
5175 mcp->mb[7] = LSW(MSD(sfp_dma));
5179 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5180 mcp->in_mb = MBX_1|MBX_0;
5181 mcp->tov = MBX_TOV_SECONDS;
5183 rval = qla2x00_mailbox_command(vha, mcp);
5185 if (rval != QLA_SUCCESS) {
5186 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5187 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5190 "Done %s.\n", __func__);
5197 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5198 uint16_t size_in_bytes, uint16_t *actual_size)
5202 mbx_cmd_t *mcp = &mc;
5204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5205 "Entered %s.\n", __func__);
5207 if (!IS_CNA_CAPABLE(vha->hw))
5208 return QLA_FUNCTION_FAILED;
5210 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5211 mcp->mb[2] = MSW(stats_dma);
5212 mcp->mb[3] = LSW(stats_dma);
5213 mcp->mb[6] = MSW(MSD(stats_dma));
5214 mcp->mb[7] = LSW(MSD(stats_dma));
5215 mcp->mb[8] = size_in_bytes >> 2;
5216 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5217 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5218 mcp->tov = MBX_TOV_SECONDS;
5220 rval = qla2x00_mailbox_command(vha, mcp);
5222 if (rval != QLA_SUCCESS) {
5223 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5224 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5225 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5228 "Done %s.\n", __func__);
5231 *actual_size = mcp->mb[2] << 2;
5238 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5243 mbx_cmd_t *mcp = &mc;
5245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5246 "Entered %s.\n", __func__);
5248 if (!IS_CNA_CAPABLE(vha->hw))
5249 return QLA_FUNCTION_FAILED;
5251 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5253 mcp->mb[2] = MSW(tlv_dma);
5254 mcp->mb[3] = LSW(tlv_dma);
5255 mcp->mb[6] = MSW(MSD(tlv_dma));
5256 mcp->mb[7] = LSW(MSD(tlv_dma));
5258 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5259 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5260 mcp->tov = MBX_TOV_SECONDS;
5262 rval = qla2x00_mailbox_command(vha, mcp);
5264 if (rval != QLA_SUCCESS) {
5265 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5266 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5267 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5270 "Done %s.\n", __func__);
5277 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5281 mbx_cmd_t *mcp = &mc;
5283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5284 "Entered %s.\n", __func__);
5286 if (!IS_FWI2_CAPABLE(vha->hw))
5287 return QLA_FUNCTION_FAILED;
5289 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5290 mcp->mb[1] = LSW(risc_addr);
5291 mcp->mb[8] = MSW(risc_addr);
5292 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5293 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5294 mcp->tov = MBX_TOV_SECONDS;
5296 rval = qla2x00_mailbox_command(vha, mcp);
5297 if (rval != QLA_SUCCESS) {
5298 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5299 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5301 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5302 "Done %s.\n", __func__);
5303 *data = mcp->mb[3] << 16 | mcp->mb[2];
5310 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5315 mbx_cmd_t *mcp = &mc;
5317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5318 "Entered %s.\n", __func__);
5320 memset(mcp->mb, 0 , sizeof(mcp->mb));
5321 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5322 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5324 /* transfer count */
5325 mcp->mb[10] = LSW(mreq->transfer_size);
5326 mcp->mb[11] = MSW(mreq->transfer_size);
5328 /* send data address */
5329 mcp->mb[14] = LSW(mreq->send_dma);
5330 mcp->mb[15] = MSW(mreq->send_dma);
5331 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5332 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5334 /* receive data address */
5335 mcp->mb[16] = LSW(mreq->rcv_dma);
5336 mcp->mb[17] = MSW(mreq->rcv_dma);
5337 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5338 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5340 /* Iteration count */
5341 mcp->mb[18] = LSW(mreq->iteration_count);
5342 mcp->mb[19] = MSW(mreq->iteration_count);
5344 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5345 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5346 if (IS_CNA_CAPABLE(vha->hw))
5347 mcp->out_mb |= MBX_2;
5348 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5350 mcp->buf_size = mreq->transfer_size;
5351 mcp->tov = MBX_TOV_SECONDS;
5352 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5354 rval = qla2x00_mailbox_command(vha, mcp);
5356 if (rval != QLA_SUCCESS) {
5357 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5358 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5359 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5360 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5363 "Done %s.\n", __func__);
5366 /* Copy mailbox information */
5367 memcpy( mresp, mcp->mb, 64);
5372 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5377 mbx_cmd_t *mcp = &mc;
5378 struct qla_hw_data *ha = vha->hw;
5380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5381 "Entered %s.\n", __func__);
5383 memset(mcp->mb, 0 , sizeof(mcp->mb));
5384 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5385 /* BIT_6 specifies 64bit address */
5386 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5387 if (IS_CNA_CAPABLE(ha)) {
5388 mcp->mb[2] = vha->fcoe_fcf_idx;
5390 mcp->mb[16] = LSW(mreq->rcv_dma);
5391 mcp->mb[17] = MSW(mreq->rcv_dma);
5392 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5393 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5395 mcp->mb[10] = LSW(mreq->transfer_size);
5397 mcp->mb[14] = LSW(mreq->send_dma);
5398 mcp->mb[15] = MSW(mreq->send_dma);
5399 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5400 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5402 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5403 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5404 if (IS_CNA_CAPABLE(ha))
5405 mcp->out_mb |= MBX_2;
5408 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5409 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5410 mcp->in_mb |= MBX_1;
5411 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5413 mcp->in_mb |= MBX_3;
5415 mcp->tov = MBX_TOV_SECONDS;
5416 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5417 mcp->buf_size = mreq->transfer_size;
5419 rval = qla2x00_mailbox_command(vha, mcp);
5421 if (rval != QLA_SUCCESS) {
5422 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5423 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5424 rval, mcp->mb[0], mcp->mb[1]);
5426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5427 "Done %s.\n", __func__);
5430 /* Copy mailbox information */
5431 memcpy(mresp, mcp->mb, 64);
5436 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5440 mbx_cmd_t *mcp = &mc;
5442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5443 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5445 mcp->mb[0] = MBC_ISP84XX_RESET;
5446 mcp->mb[1] = enable_diagnostic;
5447 mcp->out_mb = MBX_1|MBX_0;
5448 mcp->in_mb = MBX_1|MBX_0;
5449 mcp->tov = MBX_TOV_SECONDS;
5450 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5451 rval = qla2x00_mailbox_command(vha, mcp);
5453 if (rval != QLA_SUCCESS)
5454 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5457 "Done %s.\n", __func__);
5463 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5467 mbx_cmd_t *mcp = &mc;
5469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5470 "Entered %s.\n", __func__);
5472 if (!IS_FWI2_CAPABLE(vha->hw))
5473 return QLA_FUNCTION_FAILED;
5475 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5476 mcp->mb[1] = LSW(risc_addr);
5477 mcp->mb[2] = LSW(data);
5478 mcp->mb[3] = MSW(data);
5479 mcp->mb[8] = MSW(risc_addr);
5480 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5481 mcp->in_mb = MBX_1|MBX_0;
5482 mcp->tov = MBX_TOV_SECONDS;
5484 rval = qla2x00_mailbox_command(vha, mcp);
5485 if (rval != QLA_SUCCESS) {
5486 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5487 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5488 rval, mcp->mb[0], mcp->mb[1]);
5490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5491 "Done %s.\n", __func__);
5498 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5501 uint32_t stat, timer;
5503 struct qla_hw_data *ha = vha->hw;
5504 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5509 "Entered %s.\n", __func__);
5511 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5513 /* Write the MBC data to the registers */
5514 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5515 wrt_reg_word(®->mailbox1, mb[0]);
5516 wrt_reg_word(®->mailbox2, mb[1]);
5517 wrt_reg_word(®->mailbox3, mb[2]);
5518 wrt_reg_word(®->mailbox4, mb[3]);
5520 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5522 /* Poll for MBC interrupt */
5523 for (timer = 6000000; timer; timer--) {
5524 /* Check for pending interrupts. */
5525 stat = rd_reg_dword(®->host_status);
5526 if (stat & HSRX_RISC_INT) {
5529 if (stat == 0x1 || stat == 0x2 ||
5530 stat == 0x10 || stat == 0x11) {
5531 set_bit(MBX_INTERRUPT,
5532 &ha->mbx_cmd_flags);
5533 mb0 = rd_reg_word(®->mailbox0);
5534 wrt_reg_dword(®->hccr,
5535 HCCRX_CLR_RISC_INT);
5536 rd_reg_dword(®->hccr);
5543 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5544 rval = mb0 & MBS_MASK;
5546 rval = QLA_FUNCTION_FAILED;
5548 if (rval != QLA_SUCCESS) {
5549 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5550 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5553 "Done %s.\n", __func__);
5559 /* Set the specified data rate */
5561 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5565 mbx_cmd_t *mcp = &mc;
5566 struct qla_hw_data *ha = vha->hw;
5569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5570 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5573 if (!IS_FWI2_CAPABLE(ha))
5574 return QLA_FUNCTION_FAILED;
5576 memset(mcp, 0, sizeof(*mcp));
5577 switch (ha->set_data_rate) {
5578 case PORT_SPEED_AUTO:
5579 case PORT_SPEED_4GB:
5580 case PORT_SPEED_8GB:
5581 case PORT_SPEED_16GB:
5582 case PORT_SPEED_32GB:
5583 val = ha->set_data_rate;
5586 ql_log(ql_log_warn, vha, 0x1199,
5587 "Unrecognized speed setting:%d. Setting Autoneg\n",
5589 val = ha->set_data_rate = PORT_SPEED_AUTO;
5593 mcp->mb[0] = MBC_DATA_RATE;
5597 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5598 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5599 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5600 mcp->in_mb |= MBX_4|MBX_3;
5601 mcp->tov = MBX_TOV_SECONDS;
5603 rval = qla2x00_mailbox_command(vha, mcp);
5604 if (rval != QLA_SUCCESS) {
5605 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5606 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5608 if (mcp->mb[1] != 0x7)
5609 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5610 "Speed set:0x%x\n", mcp->mb[1]);
5612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5613 "Done %s.\n", __func__);
5620 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5624 mbx_cmd_t *mcp = &mc;
5625 struct qla_hw_data *ha = vha->hw;
5627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5628 "Entered %s.\n", __func__);
5630 if (!IS_FWI2_CAPABLE(ha))
5631 return QLA_FUNCTION_FAILED;
5633 mcp->mb[0] = MBC_DATA_RATE;
5634 mcp->mb[1] = QLA_GET_DATA_RATE;
5635 mcp->out_mb = MBX_1|MBX_0;
5636 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5637 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5638 mcp->in_mb |= MBX_4|MBX_3;
5639 mcp->tov = MBX_TOV_SECONDS;
5641 rval = qla2x00_mailbox_command(vha, mcp);
5642 if (rval != QLA_SUCCESS) {
5643 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5644 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5646 if (mcp->mb[1] != 0x7)
5647 ha->link_data_rate = mcp->mb[1];
5649 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5650 if (mcp->mb[4] & BIT_0)
5651 ql_log(ql_log_info, vha, 0x11a2,
5652 "FEC=enabled (data rate).\n");
5655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5656 "Done %s.\n", __func__);
5657 if (mcp->mb[1] != 0x7)
5658 ha->link_data_rate = mcp->mb[1];
5665 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5669 mbx_cmd_t *mcp = &mc;
5670 struct qla_hw_data *ha = vha->hw;
5672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5673 "Entered %s.\n", __func__);
5675 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5676 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5677 return QLA_FUNCTION_FAILED;
5678 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5679 mcp->out_mb = MBX_0;
5680 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5681 mcp->tov = MBX_TOV_SECONDS;
5684 rval = qla2x00_mailbox_command(vha, mcp);
5686 if (rval != QLA_SUCCESS) {
5687 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5688 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5690 /* Copy all bits to preserve original value */
5691 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5694 "Done %s.\n", __func__);
5700 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5704 mbx_cmd_t *mcp = &mc;
5706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5707 "Entered %s.\n", __func__);
5709 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5710 /* Copy all bits to preserve original setting */
5711 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5712 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5714 mcp->tov = MBX_TOV_SECONDS;
5716 rval = qla2x00_mailbox_command(vha, mcp);
5718 if (rval != QLA_SUCCESS) {
5719 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5720 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5723 "Done %s.\n", __func__);
5730 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5735 mbx_cmd_t *mcp = &mc;
5736 struct qla_hw_data *ha = vha->hw;
5738 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5739 "Entered %s.\n", __func__);
5741 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5742 return QLA_FUNCTION_FAILED;
5744 mcp->mb[0] = MBC_PORT_PARAMS;
5745 mcp->mb[1] = loop_id;
5746 if (ha->flags.fcp_prio_enabled)
5750 mcp->mb[4] = priority & 0xf;
5751 mcp->mb[9] = vha->vp_idx;
5752 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5753 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5754 mcp->tov = MBX_TOV_SECONDS;
5756 rval = qla2x00_mailbox_command(vha, mcp);
5764 if (rval != QLA_SUCCESS) {
5765 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5767 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5768 "Done %s.\n", __func__);
5775 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5777 int rval = QLA_FUNCTION_FAILED;
5778 struct qla_hw_data *ha = vha->hw;
5781 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5782 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5783 "Thermal not supported by this card.\n");
5787 if (IS_QLA25XX(ha)) {
5788 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5789 ha->pdev->subsystem_device == 0x0175) {
5790 rval = qla2x00_read_sfp(vha, 0, &byte,
5791 0x98, 0x1, 1, BIT_13|BIT_0);
5795 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5796 ha->pdev->subsystem_device == 0x338e) {
5797 rval = qla2x00_read_sfp(vha, 0, &byte,
5798 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5802 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5803 "Thermal not supported by this card.\n");
5807 if (IS_QLA82XX(ha)) {
5808 *temp = qla82xx_read_temperature(vha);
5811 } else if (IS_QLA8044(ha)) {
5812 *temp = qla8044_read_temperature(vha);
5817 rval = qla2x00_read_asic_temperature(vha, temp);
5822 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5825 struct qla_hw_data *ha = vha->hw;
5827 mbx_cmd_t *mcp = &mc;
5829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5830 "Entered %s.\n", __func__);
5832 if (!IS_FWI2_CAPABLE(ha))
5833 return QLA_FUNCTION_FAILED;
5835 memset(mcp, 0, sizeof(mbx_cmd_t));
5836 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5839 mcp->out_mb = MBX_1|MBX_0;
5841 mcp->tov = MBX_TOV_SECONDS;
5844 rval = qla2x00_mailbox_command(vha, mcp);
5845 if (rval != QLA_SUCCESS) {
5846 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5847 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5850 "Done %s.\n", __func__);
5857 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5860 struct qla_hw_data *ha = vha->hw;
5862 mbx_cmd_t *mcp = &mc;
5864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5865 "Entered %s.\n", __func__);
5867 if (!IS_P3P_TYPE(ha))
5868 return QLA_FUNCTION_FAILED;
5870 memset(mcp, 0, sizeof(mbx_cmd_t));
5871 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5874 mcp->out_mb = MBX_1|MBX_0;
5876 mcp->tov = MBX_TOV_SECONDS;
5879 rval = qla2x00_mailbox_command(vha, mcp);
5880 if (rval != QLA_SUCCESS) {
5881 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5882 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5885 "Done %s.\n", __func__);
5892 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5894 struct qla_hw_data *ha = vha->hw;
5896 mbx_cmd_t *mcp = &mc;
5897 int rval = QLA_FUNCTION_FAILED;
5899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5900 "Entered %s.\n", __func__);
5902 memset(mcp->mb, 0 , sizeof(mcp->mb));
5903 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5904 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5905 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5906 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5908 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5909 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5910 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5912 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5913 mcp->tov = MBX_TOV_SECONDS;
5914 rval = qla2x00_mailbox_command(vha, mcp);
5916 /* Always copy back return mailbox values. */
5917 if (rval != QLA_SUCCESS) {
5918 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5919 "mailbox command FAILED=0x%x, subcode=%x.\n",
5920 (mcp->mb[1] << 16) | mcp->mb[0],
5921 (mcp->mb[3] << 16) | mcp->mb[2]);
5923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5924 "Done %s.\n", __func__);
5925 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5926 if (!ha->md_template_size) {
5927 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5928 "Null template size obtained.\n");
5929 rval = QLA_FUNCTION_FAILED;
5936 qla82xx_md_get_template(scsi_qla_host_t *vha)
5938 struct qla_hw_data *ha = vha->hw;
5940 mbx_cmd_t *mcp = &mc;
5941 int rval = QLA_FUNCTION_FAILED;
5943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5944 "Entered %s.\n", __func__);
5946 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5947 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5948 if (!ha->md_tmplt_hdr) {
5949 ql_log(ql_log_warn, vha, 0x1124,
5950 "Unable to allocate memory for Minidump template.\n");
5954 memset(mcp->mb, 0 , sizeof(mcp->mb));
5955 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5956 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5957 mcp->mb[2] = LSW(RQST_TMPLT);
5958 mcp->mb[3] = MSW(RQST_TMPLT);
5959 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5960 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5961 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5962 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5963 mcp->mb[8] = LSW(ha->md_template_size);
5964 mcp->mb[9] = MSW(ha->md_template_size);
5966 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5967 mcp->tov = MBX_TOV_SECONDS;
5968 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5969 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5970 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5971 rval = qla2x00_mailbox_command(vha, mcp);
5973 if (rval != QLA_SUCCESS) {
5974 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5975 "mailbox command FAILED=0x%x, subcode=%x.\n",
5976 ((mcp->mb[1] << 16) | mcp->mb[0]),
5977 ((mcp->mb[3] << 16) | mcp->mb[2]));
5979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5980 "Done %s.\n", __func__);
5985 qla8044_md_get_template(scsi_qla_host_t *vha)
5987 struct qla_hw_data *ha = vha->hw;
5989 mbx_cmd_t *mcp = &mc;
5990 int rval = QLA_FUNCTION_FAILED;
5991 int offset = 0, size = MINIDUMP_SIZE_36K;
5993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5994 "Entered %s.\n", __func__);
5996 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5997 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5998 if (!ha->md_tmplt_hdr) {
5999 ql_log(ql_log_warn, vha, 0xb11b,
6000 "Unable to allocate memory for Minidump template.\n");
6004 memset(mcp->mb, 0 , sizeof(mcp->mb));
6005 while (offset < ha->md_template_size) {
6006 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6007 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6008 mcp->mb[2] = LSW(RQST_TMPLT);
6009 mcp->mb[3] = MSW(RQST_TMPLT);
6010 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6011 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6012 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6013 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6014 mcp->mb[8] = LSW(size);
6015 mcp->mb[9] = MSW(size);
6016 mcp->mb[10] = offset & 0x0000FFFF;
6017 mcp->mb[11] = offset & 0xFFFF0000;
6018 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6019 mcp->tov = MBX_TOV_SECONDS;
6020 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6021 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6022 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6023 rval = qla2x00_mailbox_command(vha, mcp);
6025 if (rval != QLA_SUCCESS) {
6026 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6027 "mailbox command FAILED=0x%x, subcode=%x.\n",
6028 ((mcp->mb[1] << 16) | mcp->mb[0]),
6029 ((mcp->mb[3] << 16) | mcp->mb[2]));
6032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6033 "Done %s.\n", __func__);
6034 offset = offset + size;
6040 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6043 struct qla_hw_data *ha = vha->hw;
6045 mbx_cmd_t *mcp = &mc;
6047 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6048 return QLA_FUNCTION_FAILED;
6050 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6051 "Entered %s.\n", __func__);
6053 memset(mcp, 0, sizeof(mbx_cmd_t));
6054 mcp->mb[0] = MBC_SET_LED_CONFIG;
6055 mcp->mb[1] = led_cfg[0];
6056 mcp->mb[2] = led_cfg[1];
6057 if (IS_QLA8031(ha)) {
6058 mcp->mb[3] = led_cfg[2];
6059 mcp->mb[4] = led_cfg[3];
6060 mcp->mb[5] = led_cfg[4];
6061 mcp->mb[6] = led_cfg[5];
6064 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6066 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6068 mcp->tov = MBX_TOV_SECONDS;
6071 rval = qla2x00_mailbox_command(vha, mcp);
6072 if (rval != QLA_SUCCESS) {
6073 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6074 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6077 "Done %s.\n", __func__);
6084 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6087 struct qla_hw_data *ha = vha->hw;
6089 mbx_cmd_t *mcp = &mc;
6091 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6092 return QLA_FUNCTION_FAILED;
6094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6095 "Entered %s.\n", __func__);
6097 memset(mcp, 0, sizeof(mbx_cmd_t));
6098 mcp->mb[0] = MBC_GET_LED_CONFIG;
6100 mcp->out_mb = MBX_0;
6101 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6103 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6104 mcp->tov = MBX_TOV_SECONDS;
6107 rval = qla2x00_mailbox_command(vha, mcp);
6108 if (rval != QLA_SUCCESS) {
6109 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6110 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6112 led_cfg[0] = mcp->mb[1];
6113 led_cfg[1] = mcp->mb[2];
6114 if (IS_QLA8031(ha)) {
6115 led_cfg[2] = mcp->mb[3];
6116 led_cfg[3] = mcp->mb[4];
6117 led_cfg[4] = mcp->mb[5];
6118 led_cfg[5] = mcp->mb[6];
6120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6121 "Done %s.\n", __func__);
6128 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6131 struct qla_hw_data *ha = vha->hw;
6133 mbx_cmd_t *mcp = &mc;
6135 if (!IS_P3P_TYPE(ha))
6136 return QLA_FUNCTION_FAILED;
6138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6139 "Entered %s.\n", __func__);
6141 memset(mcp, 0, sizeof(mbx_cmd_t));
6142 mcp->mb[0] = MBC_SET_LED_CONFIG;
6148 mcp->out_mb = MBX_7|MBX_0;
6150 mcp->tov = MBX_TOV_SECONDS;
6153 rval = qla2x00_mailbox_command(vha, mcp);
6154 if (rval != QLA_SUCCESS) {
6155 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6156 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6159 "Done %s.\n", __func__);
6166 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6169 struct qla_hw_data *ha = vha->hw;
6171 mbx_cmd_t *mcp = &mc;
6173 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6174 return QLA_FUNCTION_FAILED;
6176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6177 "Entered %s.\n", __func__);
6179 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6180 mcp->mb[1] = LSW(reg);
6181 mcp->mb[2] = MSW(reg);
6182 mcp->mb[3] = LSW(data);
6183 mcp->mb[4] = MSW(data);
6184 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6186 mcp->in_mb = MBX_1|MBX_0;
6187 mcp->tov = MBX_TOV_SECONDS;
6189 rval = qla2x00_mailbox_command(vha, mcp);
6191 if (rval != QLA_SUCCESS) {
6192 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6193 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6196 "Done %s.\n", __func__);
6203 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6206 struct qla_hw_data *ha = vha->hw;
6208 mbx_cmd_t *mcp = &mc;
6210 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6212 "Implicit LOGO Unsupported.\n");
6213 return QLA_FUNCTION_FAILED;
6217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6218 "Entering %s.\n", __func__);
6220 /* Perform Implicit LOGO. */
6221 mcp->mb[0] = MBC_PORT_LOGOUT;
6222 mcp->mb[1] = fcport->loop_id;
6223 mcp->mb[10] = BIT_15;
6224 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6226 mcp->tov = MBX_TOV_SECONDS;
6228 rval = qla2x00_mailbox_command(vha, mcp);
6229 if (rval != QLA_SUCCESS)
6230 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6231 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6234 "Done %s.\n", __func__);
6240 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6244 mbx_cmd_t *mcp = &mc;
6245 struct qla_hw_data *ha = vha->hw;
6246 unsigned long retry_max_time = jiffies + (2 * HZ);
6248 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6249 return QLA_FUNCTION_FAILED;
6251 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6254 mcp->mb[0] = MBC_READ_REMOTE_REG;
6255 mcp->mb[1] = LSW(reg);
6256 mcp->mb[2] = MSW(reg);
6257 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6258 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6259 mcp->tov = MBX_TOV_SECONDS;
6261 rval = qla2x00_mailbox_command(vha, mcp);
6263 if (rval != QLA_SUCCESS) {
6264 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6265 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6266 rval, mcp->mb[0], mcp->mb[1]);
6268 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6269 if (*data == QLA8XXX_BAD_VALUE) {
6271 * During soft-reset CAMRAM register reads might
6272 * return 0xbad0bad0. So retry for MAX of 2 sec
6273 * while reading camram registers.
6275 if (time_after(jiffies, retry_max_time)) {
6276 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6277 "Failure to read CAMRAM register. "
6278 "data=0x%x.\n", *data);
6279 return QLA_FUNCTION_FAILED;
6284 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6291 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6295 mbx_cmd_t *mcp = &mc;
6296 struct qla_hw_data *ha = vha->hw;
6298 if (!IS_QLA83XX(ha))
6299 return QLA_FUNCTION_FAILED;
6301 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6303 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6304 mcp->out_mb = MBX_0;
6305 mcp->in_mb = MBX_1|MBX_0;
6306 mcp->tov = MBX_TOV_SECONDS;
6308 rval = qla2x00_mailbox_command(vha, mcp);
6310 if (rval != QLA_SUCCESS) {
6311 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6312 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6313 rval, mcp->mb[0], mcp->mb[1]);
6314 qla2xxx_dump_fw(vha);
6316 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6323 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6324 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6328 mbx_cmd_t *mcp = &mc;
6329 uint8_t subcode = (uint8_t)options;
6330 struct qla_hw_data *ha = vha->hw;
6332 if (!IS_QLA8031(ha))
6333 return QLA_FUNCTION_FAILED;
6335 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6337 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6338 mcp->mb[1] = options;
6339 mcp->out_mb = MBX_1|MBX_0;
6340 if (subcode & BIT_2) {
6341 mcp->mb[2] = LSW(start_addr);
6342 mcp->mb[3] = MSW(start_addr);
6343 mcp->mb[4] = LSW(end_addr);
6344 mcp->mb[5] = MSW(end_addr);
6345 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6347 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6348 if (!(subcode & (BIT_2 | BIT_5)))
6349 mcp->in_mb |= MBX_4|MBX_3;
6350 mcp->tov = MBX_TOV_SECONDS;
6352 rval = qla2x00_mailbox_command(vha, mcp);
6354 if (rval != QLA_SUCCESS) {
6355 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6356 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6357 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6359 qla2xxx_dump_fw(vha);
6361 if (subcode & BIT_5)
6362 *sector_size = mcp->mb[1];
6363 else if (subcode & (BIT_6 | BIT_7)) {
6364 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6365 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6366 } else if (subcode & (BIT_3 | BIT_4)) {
6367 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6368 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6370 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6377 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6382 mbx_cmd_t *mcp = &mc;
6384 if (!IS_MCTP_CAPABLE(vha->hw))
6385 return QLA_FUNCTION_FAILED;
6387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6388 "Entered %s.\n", __func__);
6390 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6391 mcp->mb[1] = LSW(addr);
6392 mcp->mb[2] = MSW(req_dma);
6393 mcp->mb[3] = LSW(req_dma);
6394 mcp->mb[4] = MSW(size);
6395 mcp->mb[5] = LSW(size);
6396 mcp->mb[6] = MSW(MSD(req_dma));
6397 mcp->mb[7] = LSW(MSD(req_dma));
6398 mcp->mb[8] = MSW(addr);
6399 /* Setting RAM ID to valid */
6400 /* For MCTP RAM ID is 0x40 */
6401 mcp->mb[10] = BIT_7 | 0x40;
6403 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6407 mcp->tov = MBX_TOV_SECONDS;
6409 rval = qla2x00_mailbox_command(vha, mcp);
6411 if (rval != QLA_SUCCESS) {
6412 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6416 "Done %s.\n", __func__);
6423 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6424 void *dd_buf, uint size, uint options)
6428 mbx_cmd_t *mcp = &mc;
6431 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6432 !IS_QLA28XX(vha->hw))
6433 return QLA_FUNCTION_FAILED;
6435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6436 "Entered %s.\n", __func__);
6438 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6439 dd_buf, size, DMA_FROM_DEVICE);
6440 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6441 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6442 return QLA_MEMORY_ALLOC_FAILED;
6445 memset(dd_buf, 0, size);
6447 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6448 mcp->mb[1] = options;
6449 mcp->mb[2] = MSW(LSD(dd_dma));
6450 mcp->mb[3] = LSW(LSD(dd_dma));
6451 mcp->mb[6] = MSW(MSD(dd_dma));
6452 mcp->mb[7] = LSW(MSD(dd_dma));
6454 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6455 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6456 mcp->buf_size = size;
6457 mcp->flags = MBX_DMA_IN;
6458 mcp->tov = MBX_TOV_SECONDS * 4;
6459 rval = qla2x00_mailbox_command(vha, mcp);
6461 if (rval != QLA_SUCCESS) {
6462 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6465 "Done %s.\n", __func__);
6468 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6469 size, DMA_FROM_DEVICE);
6474 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6476 sp->u.iocb_cmd.u.mbx.rc = res;
6478 complete(&sp->u.iocb_cmd.u.mbx.comp);
6479 /* don't free sp here. Let the caller do the free */
6483 * This mailbox uses the iocb interface to send MB command.
6484 * This allows non-critial (non chip setup) command to go
6487 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6489 int rval = QLA_FUNCTION_FAILED;
6493 if (!vha->hw->flags.fw_started)
6497 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6501 c = &sp->u.iocb_cmd;
6502 init_completion(&c->u.mbx.comp);
6504 sp->type = SRB_MB_IOCB;
6505 sp->name = mb_to_str(mcp->mb[0]);
6506 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6507 qla2x00_async_mb_sp_done);
6509 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6511 rval = qla2x00_start_sp(sp);
6512 if (rval != QLA_SUCCESS) {
6513 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6514 "%s: %s Failed submission. %x.\n",
6515 __func__, sp->name, rval);
6519 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6520 sp->name, sp->handle);
6522 wait_for_completion(&c->u.mbx.comp);
6523 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6527 case QLA_FUNCTION_TIMEOUT:
6528 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6529 __func__, sp->name, rval);
6532 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6533 __func__, sp->name);
6536 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6537 __func__, sp->name, rval);
6543 kref_put(&sp->cmd_kref, qla2x00_sp_release);
6550 * NOTE: Do not call this routine from DPC thread
6552 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6554 int rval = QLA_FUNCTION_FAILED;
6556 struct port_database_24xx *pd;
6557 struct qla_hw_data *ha = vha->hw;
6560 if (!vha->hw->flags.fw_started)
6563 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6565 ql_log(ql_log_warn, vha, 0xd047,
6566 "Failed to allocate port database structure.\n");
6570 memset(&mc, 0, sizeof(mc));
6571 mc.mb[0] = MBC_GET_PORT_DATABASE;
6572 mc.mb[1] = fcport->loop_id;
6573 mc.mb[2] = MSW(pd_dma);
6574 mc.mb[3] = LSW(pd_dma);
6575 mc.mb[6] = MSW(MSD(pd_dma));
6576 mc.mb[7] = LSW(MSD(pd_dma));
6577 mc.mb[9] = vha->vp_idx;
6580 rval = qla24xx_send_mb_cmd(vha, &mc);
6581 if (rval != QLA_SUCCESS) {
6582 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6583 "%s: %8phC fail\n", __func__, fcport->port_name);
6587 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6589 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6590 __func__, fcport->port_name);
6594 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6599 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6600 struct port_database_24xx *pd)
6602 int rval = QLA_SUCCESS;
6604 u8 current_login_state, last_login_state;
6606 if (NVME_TARGET(vha->hw, fcport)) {
6607 current_login_state = pd->current_login_state >> 4;
6608 last_login_state = pd->last_login_state >> 4;
6610 current_login_state = pd->current_login_state & 0xf;
6611 last_login_state = pd->last_login_state & 0xf;
6614 /* Check for logged in state. */
6615 if (current_login_state != PDS_PRLI_COMPLETE) {
6616 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6617 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6618 current_login_state, last_login_state, fcport->loop_id);
6619 rval = QLA_FUNCTION_FAILED;
6623 if (fcport->loop_id == FC_NO_LOOP_ID ||
6624 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6625 memcmp(fcport->port_name, pd->port_name, 8))) {
6626 /* We lost the device mid way. */
6627 rval = QLA_NOT_LOGGED_IN;
6631 /* Names are little-endian. */
6632 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6633 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6635 /* Get port_id of device. */
6636 fcport->d_id.b.domain = pd->port_id[0];
6637 fcport->d_id.b.area = pd->port_id[1];
6638 fcport->d_id.b.al_pa = pd->port_id[2];
6639 fcport->d_id.b.rsvd_1 = 0;
6641 ql_dbg(ql_dbg_disc, vha, 0x2062,
6642 "%8phC SVC Param w3 %02x%02x",
6644 pd->prli_svc_param_word_3[1],
6645 pd->prli_svc_param_word_3[0]);
6647 if (NVME_TARGET(vha->hw, fcport)) {
6648 fcport->port_type = FCT_NVME;
6649 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6650 fcport->port_type |= FCT_NVME_INITIATOR;
6651 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6652 fcport->port_type |= FCT_NVME_TARGET;
6653 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6654 fcport->port_type |= FCT_NVME_DISCOVERY;
6656 /* If not target must be initiator or unknown type. */
6657 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6658 fcport->port_type = FCT_INITIATOR;
6660 fcport->port_type = FCT_TARGET;
6662 /* Passback COS information. */
6663 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6664 FC_COS_CLASS2 : FC_COS_CLASS3;
6666 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6667 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6668 fcport->conf_compl_supported = 1;
6676 * qla24xx_gidlist__wait
6677 * NOTE: don't call this routine from DPC thread.
6679 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6680 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6682 int rval = QLA_FUNCTION_FAILED;
6685 if (!vha->hw->flags.fw_started)
6688 memset(&mc, 0, sizeof(mc));
6689 mc.mb[0] = MBC_GET_ID_LIST;
6690 mc.mb[2] = MSW(id_list_dma);
6691 mc.mb[3] = LSW(id_list_dma);
6692 mc.mb[6] = MSW(MSD(id_list_dma));
6693 mc.mb[7] = LSW(MSD(id_list_dma));
6695 mc.mb[9] = vha->vp_idx;
6697 rval = qla24xx_send_mb_cmd(vha, &mc);
6698 if (rval != QLA_SUCCESS) {
6699 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6700 "%s: fail\n", __func__);
6702 *entries = mc.mb[1];
6703 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6704 "%s: done\n", __func__);
6710 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6714 mbx_cmd_t *mcp = &mc;
6716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6717 "Entered %s\n", __func__);
6719 memset(mcp->mb, 0 , sizeof(mcp->mb));
6720 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6723 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6724 mcp->in_mb = MBX_2 | MBX_0;
6725 mcp->tov = MBX_TOV_SECONDS;
6728 rval = qla2x00_mailbox_command(vha, mcp);
6730 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6731 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6736 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6740 mbx_cmd_t *mcp = &mc;
6742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6743 "Entered %s\n", __func__);
6745 memset(mcp->mb, 0, sizeof(mcp->mb));
6746 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6748 mcp->out_mb = MBX_1 | MBX_0;
6749 mcp->in_mb = MBX_2 | MBX_0;
6750 mcp->tov = MBX_TOV_SECONDS;
6753 rval = qla2x00_mailbox_command(vha, mcp);
6754 if (rval == QLA_SUCCESS)
6757 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6758 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6764 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6766 struct qla_hw_data *ha = vha->hw;
6767 uint16_t iter, addr, offset;
6768 dma_addr_t phys_addr;
6772 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6774 phys_addr = ha->sfp_data_dma;
6775 sfp_data = ha->sfp_data;
6778 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6780 /* Skip to next device address. */
6785 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6786 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6787 if (rval != QLA_SUCCESS) {
6788 ql_log(ql_log_warn, vha, 0x706d,
6789 "Unable to read SFP data (%x/%x/%x).\n", rval,
6795 if (buf && (c < count)) {
6798 if ((count - c) >= SFP_BLOCK_SIZE)
6799 sz = SFP_BLOCK_SIZE;
6803 memcpy(buf, sfp_data, sz);
6804 buf += SFP_BLOCK_SIZE;
6807 phys_addr += SFP_BLOCK_SIZE;
6808 sfp_data += SFP_BLOCK_SIZE;
6809 offset += SFP_BLOCK_SIZE;
6815 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6816 uint16_t *out_mb, int out_mb_sz)
6818 int rval = QLA_FUNCTION_FAILED;
6821 if (!vha->hw->flags.fw_started)
6824 memset(&mc, 0, sizeof(mc));
6825 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6827 rval = qla24xx_send_mb_cmd(vha, &mc);
6828 if (rval != QLA_SUCCESS) {
6829 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6830 "%s: fail\n", __func__);
6832 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6833 memcpy(out_mb, mc.mb, out_mb_sz);
6835 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6837 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6838 "%s: done\n", __func__);
6844 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6845 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6850 mbx_cmd_t *mcp = &mc;
6852 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6854 mcp->mb[2] = region;
6855 mcp->mb[3] = MSW(len);
6856 mcp->mb[4] = LSW(len);
6857 mcp->mb[5] = MSW(sfub_dma_addr);
6858 mcp->mb[6] = LSW(sfub_dma_addr);
6859 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6860 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6861 mcp->mb[9] = sfub_len;
6863 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6864 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6865 mcp->tov = MBX_TOV_SECONDS;
6867 rval = qla2x00_mailbox_command(vha, mcp);
6869 if (rval != QLA_SUCCESS) {
6870 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6871 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6878 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6883 mbx_cmd_t *mcp = &mc;
6885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6886 "Entered %s.\n", __func__);
6888 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6889 mcp->mb[1] = LSW(addr);
6890 mcp->mb[2] = MSW(addr);
6891 mcp->mb[3] = LSW(data);
6892 mcp->mb[4] = MSW(data);
6893 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6894 mcp->in_mb = MBX_1|MBX_0;
6895 mcp->tov = MBX_TOV_SECONDS;
6897 rval = qla2x00_mailbox_command(vha, mcp);
6899 if (rval != QLA_SUCCESS) {
6900 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6904 "Done %s.\n", __func__);
6910 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6915 mbx_cmd_t *mcp = &mc;
6917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6918 "Entered %s.\n", __func__);
6920 mcp->mb[0] = MBC_READ_REMOTE_REG;
6921 mcp->mb[1] = LSW(addr);
6922 mcp->mb[2] = MSW(addr);
6923 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6924 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6925 mcp->tov = MBX_TOV_SECONDS;
6927 rval = qla2x00_mailbox_command(vha, mcp);
6929 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6931 if (rval != QLA_SUCCESS) {
6932 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6933 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6936 "Done %s.\n", __func__);
6943 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6945 struct qla_hw_data *ha = vha->hw;
6947 mbx_cmd_t *mcp = &mc;
6950 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6951 return QLA_FUNCTION_FAILED;
6953 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6956 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6957 mcp->mb[1] = options;
6958 mcp->out_mb = MBX_1|MBX_0;
6959 mcp->in_mb = MBX_1|MBX_0;
6960 if (options & BIT_0) {
6961 if (options & BIT_1) {
6962 mcp->mb[2] = led[2];
6963 mcp->out_mb |= MBX_2;
6965 if (options & BIT_2) {
6966 mcp->mb[3] = led[0];
6967 mcp->out_mb |= MBX_3;
6969 if (options & BIT_3) {
6970 mcp->mb[4] = led[1];
6971 mcp->out_mb |= MBX_4;
6974 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6976 mcp->tov = MBX_TOV_SECONDS;
6978 rval = qla2x00_mailbox_command(vha, mcp);
6980 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6981 __func__, rval, mcp->mb[0], mcp->mb[1]);
6985 if (options & BIT_0) {
6986 ha->beacon_blink_led = 0;
6987 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6989 led[2] = mcp->mb[2];
6990 led[0] = mcp->mb[3];
6991 led[1] = mcp->mb[4];
6992 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6993 __func__, led[0], led[1], led[2]);
7000 * qla_no_op_mb(): This MB is used to check if FW is still alive and
7001 * able to generate an interrupt. Otherwise, a timeout will trigger
7003 * @vha: host adapter pointer
7006 void qla_no_op_mb(struct scsi_qla_host *vha)
7009 mbx_cmd_t *mcp = &mc;
7012 memset(&mc, 0, sizeof(mc));
7013 mcp->mb[0] = 0; // noop cmd= 0
7014 mcp->out_mb = MBX_0;
7018 rval = qla2x00_mailbox_command(vha, mcp);
7021 ql_dbg(ql_dbg_async, vha, 0x7071,
7022 "Failed %s %x\n", __func__, rval);
7026 int qla_mailbox_passthru(scsi_qla_host_t *vha,
7027 uint16_t *mbx_in, uint16_t *mbx_out)
7030 mbx_cmd_t *mcp = &mc;
7033 memset(&mc, 0, sizeof(mc));
7034 /* Receiving all 32 register's contents */
7035 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7037 mcp->out_mb = 0xFFFFFFFF;
7038 mcp->in_mb = 0xFFFFFFFF;
7040 mcp->tov = MBX_TOV_SECONDS;
7044 rval = qla2x00_mailbox_command(vha, mcp);
7046 if (rval != QLA_SUCCESS) {
7047 ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7048 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7050 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
7052 /* passing all 32 register's contents */
7053 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));