1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
13 #define IS_PPCARCH true
15 #define IS_PPCARCH false
18 static struct mb_cmd_name {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
28 static const char *mb_to_str(uint16_t cmd)
31 struct mb_cmd_name *e;
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
41 static struct rom_cmd {
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_READ_RAM_WORD },
47 { MBC_MAILBOX_REGISTER_TEST },
48 { MBC_VERIFY_CHECKSUM },
49 { MBC_GET_FIRMWARE_VERSION },
50 { MBC_LOAD_RISC_RAM },
51 { MBC_DUMP_RISC_RAM },
52 { MBC_LOAD_RISC_RAM_EXTENDED },
53 { MBC_DUMP_RISC_RAM_EXTENDED },
54 { MBC_WRITE_RAM_WORD_EXTENDED },
55 { MBC_READ_RAM_EXTENDED },
56 { MBC_GET_RESOURCE_COUNTS },
57 { MBC_SET_FIRMWARE_OPTION },
58 { MBC_MID_INITIALIZE_FIRMWARE },
59 { MBC_GET_FIRMWARE_STATE },
60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 { MBC_GET_RETRY_COUNT },
62 { MBC_TRACE_CONTROL },
63 { MBC_INITIALIZE_MULTIQ },
64 { MBC_IOCB_COMMAND_A64 },
65 { MBC_GET_ADAPTER_LOOP_ID },
67 { MBC_SET_RNID_PARAMS },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
72 static int is_rom_cmd(uint16_t cmd)
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
109 unsigned long flags = 0;
111 uint8_t abort_active, eeh_delay;
113 uint16_t command = 0;
115 __le16 __iomem *optr;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 "PCI channel failed permanently, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 if (vha->device_flags & DFLG_DEV_FAILED) {
133 ql_log(ql_log_warn, vha, 0x1002,
134 "Device in failed state, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
138 /* if PCI error, then avoid mbx processing.*/
139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 ql_log(ql_log_warn, vha, 0xd04e,
142 "PCI error, exiting.\n");
143 return QLA_FUNCTION_TIMEOUT;
147 io_lock_on = base_vha->flags.init_done;
150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 chip_reset = ha->chip_reset;
153 if (ha->flags.pci_channel_io_perm_failure) {
154 ql_log(ql_log_warn, vha, 0x1003,
155 "Perm failure on EEH timeout MBX, exiting.\n");
156 return QLA_FUNCTION_TIMEOUT;
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 /* Setting Link-Down error */
161 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 ql_log(ql_log_warn, vha, 0x1004,
163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 return QLA_FUNCTION_TIMEOUT;
167 /* check if ISP abort is active and return cmd with timeout */
168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
172 ql_log(ql_log_info, vha, 0x1005,
173 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
175 return QLA_FUNCTION_TIMEOUT;
178 atomic_inc(&ha->num_pend_mbx_stage1);
180 * Wait for active mailbox commands to finish by waiting at most tov
181 * seconds. This is to serialize actual issuing of mailbox cmds during
182 * non ISP abort time.
184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
185 /* Timeout occurred. Return error. */
186 ql_log(ql_log_warn, vha, 0xd035,
187 "Cmd access timeout, cmd=0x%x, Exiting.\n",
190 atomic_dec(&ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
196 ql_log(ql_log_warn, vha, 0xd035,
197 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
204 /* Save mailbox command for debug */
207 ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
210 spin_lock_irqsave(&ha->hardware_lock, flags);
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
218 ha->flags.mbox_busy = 1;
220 /* Load mailbox registers. */
222 optr = ®->isp82.mailbox_in[0];
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 optr = ®->isp24.mailbox0;
226 optr = MAILBOX_REG(ha, ®->isp, 0);
229 command = mcp->mb[0];
230 mboxes = mcp->out_mb;
232 ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 "Mailbox registers (OUT):\n");
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, ®->isp, 8);
237 if (mboxes & BIT_0) {
238 ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 wrt_reg_word(optr, *iptr);
248 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
249 "I/O Address = %p.\n", optr);
251 /* Issue set host interrupt command to send cmd out. */
252 ha->flags.mbox_int = 0;
253 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
255 /* Unlock mbx registers and wait for interrupt */
256 ql_dbg(ql_dbg_mbx, vha, 0x100f,
257 "Going to unlock irq & waiting for interrupts. "
258 "jiffies=%lx.\n", jiffies);
260 /* Wait for mbx cmd completion until timeout */
261 atomic_inc(&ha->num_pend_mbx_stage2);
262 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
263 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
266 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
267 else if (IS_FWI2_CAPABLE(ha))
268 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
270 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
271 spin_unlock_irqrestore(&ha->hardware_lock, flags);
274 atomic_inc(&ha->num_pend_mbx_stage3);
275 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
277 if (chip_reset != ha->chip_reset) {
278 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
280 spin_lock_irqsave(&ha->hardware_lock, flags);
281 ha->flags.mbox_busy = 0;
282 spin_unlock_irqrestore(&ha->hardware_lock,
284 atomic_dec(&ha->num_pend_mbx_stage2);
285 atomic_dec(&ha->num_pend_mbx_stage3);
289 ql_dbg(ql_dbg_mbx, vha, 0x117a,
290 "cmd=%x Timeout.\n", command);
291 spin_lock_irqsave(&ha->hardware_lock, flags);
292 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
293 spin_unlock_irqrestore(&ha->hardware_lock, flags);
295 } else if (ha->flags.purge_mbox ||
296 chip_reset != ha->chip_reset) {
297 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
299 spin_lock_irqsave(&ha->hardware_lock, flags);
300 ha->flags.mbox_busy = 0;
301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
302 atomic_dec(&ha->num_pend_mbx_stage2);
303 atomic_dec(&ha->num_pend_mbx_stage3);
307 atomic_dec(&ha->num_pend_mbx_stage3);
309 if (time_after(jiffies, wait_time + 5 * HZ))
310 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
311 command, jiffies_to_msecs(jiffies - wait_time));
313 ql_dbg(ql_dbg_mbx, vha, 0x1011,
314 "Cmd=%x Polling Mode.\n", command);
316 if (IS_P3P_TYPE(ha)) {
317 if (rd_reg_dword(®->isp82.hint) &
318 HINT_MBX_INT_PENDING) {
319 ha->flags.mbox_busy = 0;
320 spin_unlock_irqrestore(&ha->hardware_lock,
322 atomic_dec(&ha->num_pend_mbx_stage2);
323 ql_dbg(ql_dbg_mbx, vha, 0x1012,
324 "Pending mailbox timeout, exiting.\n");
326 rval = QLA_FUNCTION_TIMEOUT;
329 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
330 } else if (IS_FWI2_CAPABLE(ha))
331 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
333 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
334 spin_unlock_irqrestore(&ha->hardware_lock, flags);
336 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
337 while (!ha->flags.mbox_int) {
338 if (ha->flags.purge_mbox ||
339 chip_reset != ha->chip_reset) {
340 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
342 spin_lock_irqsave(&ha->hardware_lock, flags);
343 ha->flags.mbox_busy = 0;
344 spin_unlock_irqrestore(&ha->hardware_lock,
346 atomic_dec(&ha->num_pend_mbx_stage2);
351 if (time_after(jiffies, wait_time))
354 /* Check for pending interrupts. */
355 qla2x00_poll(ha->rsp_q_map[0]);
357 if (!ha->flags.mbox_int &&
359 command == MBC_LOAD_RISC_RAM_EXTENDED))
362 ql_dbg(ql_dbg_mbx, vha, 0x1013,
364 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
366 atomic_dec(&ha->num_pend_mbx_stage2);
368 /* Check whether we timed out */
369 if (ha->flags.mbox_int) {
372 ql_dbg(ql_dbg_mbx, vha, 0x1014,
373 "Cmd=%x completed.\n", command);
375 /* Got interrupt. Clear the flag. */
376 ha->flags.mbox_int = 0;
377 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
379 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
380 spin_lock_irqsave(&ha->hardware_lock, flags);
381 ha->flags.mbox_busy = 0;
382 spin_unlock_irqrestore(&ha->hardware_lock, flags);
384 /* Setting Link-Down error */
385 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
387 rval = QLA_FUNCTION_FAILED;
388 ql_log(ql_log_warn, vha, 0xd048,
389 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
393 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
394 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
395 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
396 MBS_COMMAND_COMPLETE);
397 rval = QLA_FUNCTION_FAILED;
400 /* Load return mailbox registers. */
402 iptr = (uint16_t *)&ha->mailbox_out[0];
405 ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 "Mailbox registers (IN):\n");
407 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 if (mboxes & BIT_0) {
410 ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 "mbox[%d]->0x%04x\n", cnt, *iptr2);
421 uint32_t ictrl, host_status, hccr;
424 if (IS_FWI2_CAPABLE(ha)) {
425 mb[0] = rd_reg_word(®->isp24.mailbox0);
426 mb[1] = rd_reg_word(®->isp24.mailbox1);
427 mb[2] = rd_reg_word(®->isp24.mailbox2);
428 mb[3] = rd_reg_word(®->isp24.mailbox3);
429 mb[7] = rd_reg_word(®->isp24.mailbox7);
430 ictrl = rd_reg_dword(®->isp24.ictrl);
431 host_status = rd_reg_dword(®->isp24.host_status);
432 hccr = rd_reg_dword(®->isp24.hccr);
434 ql_log(ql_log_warn, vha, 0xd04c,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 mb[7], host_status, hccr);
442 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
443 ictrl = rd_reg_word(®->isp.ictrl);
444 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
445 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
446 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
449 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
451 /* Capture FW dump only, if PCI device active */
452 if (!pci_channel_offline(vha->hw->pdev)) {
453 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
454 if (w == 0xffff || ictrl == 0xffffffff ||
455 (chip_reset != ha->chip_reset)) {
456 /* This is special case if there is unload
457 * of driver happening and if PCI device go
458 * into bad state due to PCI error condition
459 * then only PCI ERR flag would be set.
460 * we will do premature exit for above case.
462 spin_lock_irqsave(&ha->hardware_lock, flags);
463 ha->flags.mbox_busy = 0;
464 spin_unlock_irqrestore(&ha->hardware_lock,
466 rval = QLA_FUNCTION_TIMEOUT;
470 /* Attempt to capture firmware dump for further
471 * anallysis of the current formware state. we do not
472 * need to do this if we are intentionally generating
475 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
476 qla2xxx_dump_fw(vha);
477 rval = QLA_FUNCTION_TIMEOUT;
480 spin_lock_irqsave(&ha->hardware_lock, flags);
481 ha->flags.mbox_busy = 0;
482 spin_unlock_irqrestore(&ha->hardware_lock, flags);
487 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
488 ql_dbg(ql_dbg_mbx, vha, 0x101a,
489 "Checking for additional resp interrupt.\n");
491 /* polling mode for non isp_abort commands. */
492 qla2x00_poll(ha->rsp_q_map[0]);
495 if (rval == QLA_FUNCTION_TIMEOUT &&
496 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
497 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
498 ha->flags.eeh_busy) {
499 /* not in dpc. schedule it for dpc to take over. */
500 ql_dbg(ql_dbg_mbx, vha, 0x101b,
501 "Timeout, schedule isp_abort_needed.\n");
503 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
504 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
505 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
506 if (IS_QLA82XX(ha)) {
507 ql_dbg(ql_dbg_mbx, vha, 0x112a,
508 "disabling pause transmit on port "
511 QLA82XX_CRB_NIU + 0x98,
512 CRB_NIU_XG_PAUSE_CTL_P0|
513 CRB_NIU_XG_PAUSE_CTL_P1);
515 ql_log(ql_log_info, base_vha, 0x101c,
516 "Mailbox cmd timeout occurred, cmd=0x%x, "
517 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
518 "abort.\n", command, mcp->mb[0],
521 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
522 qla2xxx_wake_dpc(vha);
524 } else if (current == ha->dpc_thread) {
525 /* call abort directly since we are in the DPC thread */
526 ql_dbg(ql_dbg_mbx, vha, 0x101d,
527 "Timeout, calling abort_isp.\n");
529 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
530 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
531 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
532 if (IS_QLA82XX(ha)) {
533 ql_dbg(ql_dbg_mbx, vha, 0x112b,
534 "disabling pause transmit on port "
537 QLA82XX_CRB_NIU + 0x98,
538 CRB_NIU_XG_PAUSE_CTL_P0|
539 CRB_NIU_XG_PAUSE_CTL_P1);
541 ql_log(ql_log_info, base_vha, 0x101e,
542 "Mailbox cmd timeout occurred, cmd=0x%x, "
543 "mb[0]=0x%x. Scheduling ISP abort ",
544 command, mcp->mb[0]);
546 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
547 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
548 /* Allow next mbx cmd to come in. */
549 complete(&ha->mbx_cmd_comp);
550 if (ha->isp_ops->abort_isp(vha) &&
551 !ha->flags.eeh_busy) {
552 /* Failed. retry later. */
553 set_bit(ISP_ABORT_NEEDED,
556 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
557 ql_dbg(ql_dbg_mbx, vha, 0x101f,
558 "Finished abort_isp.\n");
565 /* Allow next mbx cmd to come in. */
566 complete(&ha->mbx_cmd_comp);
569 if (rval == QLA_ABORTED) {
570 ql_log(ql_log_info, vha, 0xd035,
571 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
574 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
575 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
576 dev_name(&ha->pdev->dev), 0x1020+0x800,
580 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
581 if (mboxes & BIT_0) {
582 printk(" mb[%u]=%x", i, mcp->mb[i]);
585 pr_warn(" cmd=%x ****\n", command);
587 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
588 ql_dbg(ql_dbg_mbx, vha, 0x1198,
589 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
590 rd_reg_dword(®->isp24.host_status),
591 rd_reg_dword(®->isp24.ictrl),
592 rd_reg_dword(®->isp24.istatus));
594 ql_dbg(ql_dbg_mbx, vha, 0x1206,
595 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
596 rd_reg_word(®->isp.ctrl_status),
597 rd_reg_word(®->isp.ictrl),
598 rd_reg_word(®->isp.istatus));
601 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
605 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
607 * The caller of this mailbox encounter pci error.
608 * Hold the thread until PCIE link reset complete to make
609 * sure caller does not unmap dma while recovery is
619 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
620 uint32_t risc_code_size)
623 struct qla_hw_data *ha = vha->hw;
625 mbx_cmd_t *mcp = &mc;
627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
628 "Entered %s.\n", __func__);
630 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
631 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
632 mcp->mb[8] = MSW(risc_addr);
633 mcp->out_mb = MBX_8|MBX_0;
635 mcp->mb[0] = MBC_LOAD_RISC_RAM;
638 mcp->mb[1] = LSW(risc_addr);
639 mcp->mb[2] = MSW(req_dma);
640 mcp->mb[3] = LSW(req_dma);
641 mcp->mb[6] = MSW(MSD(req_dma));
642 mcp->mb[7] = LSW(MSD(req_dma));
643 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
644 if (IS_FWI2_CAPABLE(ha)) {
645 mcp->mb[4] = MSW(risc_code_size);
646 mcp->mb[5] = LSW(risc_code_size);
647 mcp->out_mb |= MBX_5|MBX_4;
649 mcp->mb[4] = LSW(risc_code_size);
650 mcp->out_mb |= MBX_4;
653 mcp->in_mb = MBX_1|MBX_0;
654 mcp->tov = MBX_TOV_SECONDS;
656 rval = qla2x00_mailbox_command(vha, mcp);
658 if (rval != QLA_SUCCESS) {
659 ql_dbg(ql_dbg_mbx, vha, 0x1023,
660 "Failed=%x mb[0]=%x mb[1]=%x.\n",
661 rval, mcp->mb[0], mcp->mb[1]);
664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
665 "Done %s.\n", __func__);
671 #define NVME_ENABLE_FLAG BIT_3
672 #define EDIF_HW_SUPPORT BIT_10
676 * Start adapter firmware.
679 * ha = adapter block pointer.
680 * TARGET_QUEUE_LOCK must be released.
681 * ADAPTER_STATE_LOCK must be released.
684 * qla2x00 local function return status code.
690 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
693 struct qla_hw_data *ha = vha->hw;
695 mbx_cmd_t *mcp = &mc;
697 #define EXE_FW_FORCE_SEMAPHORE BIT_7
700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
701 "Entered %s.\n", __func__);
704 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
707 if (IS_FWI2_CAPABLE(ha)) {
708 mcp->mb[1] = MSW(risc_addr);
709 mcp->mb[2] = LSW(risc_addr);
715 if (ha->flags.lr_detected) {
717 if (IS_BPM_RANGE_CAPABLE(ha))
719 ha->lr_distance << LR_DIST_FW_POS;
722 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
723 mcp->mb[4] |= NVME_ENABLE_FLAG;
725 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
726 struct nvram_81xx *nv = ha->nvram;
727 /* set minimum speed if specified in nvram */
728 if (nv->min_supported_speed >= 2 &&
729 nv->min_supported_speed <= 5) {
731 mcp->mb[11] |= nv->min_supported_speed & 0xF;
732 mcp->out_mb |= MBX_11;
734 vha->min_supported_speed =
735 nv->min_supported_speed;
739 mcp->mb[11] |= BIT_4;
742 if (ha->flags.exlogins_enabled)
743 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
745 if (ha->flags.exchoffld_enabled)
746 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
749 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
751 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
752 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
754 mcp->mb[1] = LSW(risc_addr);
755 mcp->out_mb |= MBX_1;
756 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
758 mcp->out_mb |= MBX_2;
762 mcp->tov = MBX_TOV_SECONDS;
764 rval = qla2x00_mailbox_command(vha, mcp);
766 if (rval != QLA_SUCCESS) {
767 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
768 mcp->mb[1] == 0x27 && retry) {
771 ql_dbg(ql_dbg_async, vha, 0x1026,
772 "Exe FW: force semaphore.\n");
776 ql_dbg(ql_dbg_mbx, vha, 0x1026,
777 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
782 if (!IS_FWI2_CAPABLE(ha))
785 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
786 ql_dbg(ql_dbg_mbx, vha, 0x119a,
787 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
788 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
789 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
790 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
791 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
792 ha->max_supported_speed == 0 ? "16Gps" :
793 ha->max_supported_speed == 1 ? "32Gps" :
794 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
795 if (vha->min_supported_speed) {
796 ha->min_supported_speed = mcp->mb[5] &
797 (BIT_0 | BIT_1 | BIT_2);
798 ql_dbg(ql_dbg_mbx, vha, 0x119c,
799 "min_supported_speed=%s.\n",
800 ha->min_supported_speed == 6 ? "64Gps" :
801 ha->min_supported_speed == 5 ? "32Gps" :
802 ha->min_supported_speed == 4 ? "16Gps" :
803 ha->min_supported_speed == 3 ? "8Gps" :
804 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
808 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
809 ha->flags.edif_hw = 1;
810 ql_log(ql_log_info, vha, 0xffff,
811 "%s: edif HW\n", __func__);
815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
816 "Done %s.\n", __func__);
822 * qla_get_exlogin_status
823 * Get extended login status
824 * uses the memory offload control/status Mailbox
827 * ha: adapter state pointer.
828 * fwopt: firmware options
831 * qla2x00 local function status
836 #define FETCH_XLOGINS_STAT 0x8
838 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
839 uint16_t *ex_logins_cnt)
843 mbx_cmd_t *mcp = &mc;
845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
846 "Entered %s\n", __func__);
848 memset(mcp->mb, 0 , sizeof(mcp->mb));
849 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
850 mcp->mb[1] = FETCH_XLOGINS_STAT;
851 mcp->out_mb = MBX_1|MBX_0;
852 mcp->in_mb = MBX_10|MBX_4|MBX_0;
853 mcp->tov = MBX_TOV_SECONDS;
856 rval = qla2x00_mailbox_command(vha, mcp);
857 if (rval != QLA_SUCCESS) {
858 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
860 *buf_sz = mcp->mb[4];
861 *ex_logins_cnt = mcp->mb[10];
863 ql_log(ql_log_info, vha, 0x1190,
864 "buffer size 0x%x, exchange login count=%d\n",
865 mcp->mb[4], mcp->mb[10]);
867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
868 "Done %s.\n", __func__);
875 * qla_set_exlogin_mem_cfg
876 * set extended login memory configuration
877 * Mbx needs to be issues before init_cb is set
880 * ha: adapter state pointer.
881 * buffer: buffer pointer
882 * phys_addr: physical address of buffer
883 * size: size of buffer
884 * TARGET_QUEUE_LOCK must be released
885 * ADAPTER_STATE_LOCK must be release
888 * qla2x00 local funxtion status code.
893 #define CONFIG_XLOGINS_MEM 0x9
895 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
899 mbx_cmd_t *mcp = &mc;
900 struct qla_hw_data *ha = vha->hw;
902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
903 "Entered %s.\n", __func__);
905 memset(mcp->mb, 0 , sizeof(mcp->mb));
906 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
907 mcp->mb[1] = CONFIG_XLOGINS_MEM;
908 mcp->mb[2] = MSW(phys_addr);
909 mcp->mb[3] = LSW(phys_addr);
910 mcp->mb[6] = MSW(MSD(phys_addr));
911 mcp->mb[7] = LSW(MSD(phys_addr));
912 mcp->mb[8] = MSW(ha->exlogin_size);
913 mcp->mb[9] = LSW(ha->exlogin_size);
914 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
915 mcp->in_mb = MBX_11|MBX_0;
916 mcp->tov = MBX_TOV_SECONDS;
918 rval = qla2x00_mailbox_command(vha, mcp);
919 if (rval != QLA_SUCCESS) {
920 ql_dbg(ql_dbg_mbx, vha, 0x111b,
921 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
922 rval, mcp->mb[0], mcp->mb[11]);
924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
925 "Done %s.\n", __func__);
932 * qla_get_exchoffld_status
933 * Get exchange offload status
934 * uses the memory offload control/status Mailbox
937 * ha: adapter state pointer.
938 * fwopt: firmware options
941 * qla2x00 local function status
946 #define FETCH_XCHOFFLD_STAT 0x2
948 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
949 uint16_t *ex_logins_cnt)
953 mbx_cmd_t *mcp = &mc;
955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
956 "Entered %s\n", __func__);
958 memset(mcp->mb, 0 , sizeof(mcp->mb));
959 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
960 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
961 mcp->out_mb = MBX_1|MBX_0;
962 mcp->in_mb = MBX_10|MBX_4|MBX_0;
963 mcp->tov = MBX_TOV_SECONDS;
966 rval = qla2x00_mailbox_command(vha, mcp);
967 if (rval != QLA_SUCCESS) {
968 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
970 *buf_sz = mcp->mb[4];
971 *ex_logins_cnt = mcp->mb[10];
973 ql_log(ql_log_info, vha, 0x118e,
974 "buffer size 0x%x, exchange offload count=%d\n",
975 mcp->mb[4], mcp->mb[10]);
977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
978 "Done %s.\n", __func__);
985 * qla_set_exchoffld_mem_cfg
986 * Set exchange offload memory configuration
987 * Mbx needs to be issues before init_cb is set
990 * ha: adapter state pointer.
991 * buffer: buffer pointer
992 * phys_addr: physical address of buffer
993 * size: size of buffer
994 * TARGET_QUEUE_LOCK must be released
995 * ADAPTER_STATE_LOCK must be release
998 * qla2x00 local funxtion status code.
1003 #define CONFIG_XCHOFFLD_MEM 0x3
1005 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1009 mbx_cmd_t *mcp = &mc;
1010 struct qla_hw_data *ha = vha->hw;
1012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1013 "Entered %s.\n", __func__);
1015 memset(mcp->mb, 0 , sizeof(mcp->mb));
1016 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1017 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1018 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1019 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1020 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1021 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1022 mcp->mb[8] = MSW(ha->exchoffld_size);
1023 mcp->mb[9] = LSW(ha->exchoffld_size);
1024 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1025 mcp->in_mb = MBX_11|MBX_0;
1026 mcp->tov = MBX_TOV_SECONDS;
1028 rval = qla2x00_mailbox_command(vha, mcp);
1029 if (rval != QLA_SUCCESS) {
1031 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1034 "Done %s.\n", __func__);
1041 * qla2x00_get_fw_version
1042 * Get firmware version.
1045 * ha: adapter state pointer.
1046 * major: pointer for major number.
1047 * minor: pointer for minor number.
1048 * subminor: pointer for subminor number.
1051 * qla2x00 local function return status code.
1057 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1061 mbx_cmd_t *mcp = &mc;
1062 struct qla_hw_data *ha = vha->hw;
1064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1065 "Entered %s.\n", __func__);
1067 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1068 mcp->out_mb = MBX_0;
1069 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1070 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1071 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1072 if (IS_FWI2_CAPABLE(ha))
1073 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1074 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1076 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1077 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1080 mcp->tov = MBX_TOV_SECONDS;
1081 rval = qla2x00_mailbox_command(vha, mcp);
1082 if (rval != QLA_SUCCESS)
1085 /* Return mailbox data. */
1086 ha->fw_major_version = mcp->mb[1];
1087 ha->fw_minor_version = mcp->mb[2];
1088 ha->fw_subminor_version = mcp->mb[3];
1089 ha->fw_attributes = mcp->mb[6];
1090 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1091 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1093 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1095 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1096 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1097 ha->mpi_version[1] = mcp->mb[11] >> 8;
1098 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1099 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1100 ha->phy_version[0] = mcp->mb[8] & 0xff;
1101 ha->phy_version[1] = mcp->mb[9] >> 8;
1102 ha->phy_version[2] = mcp->mb[9] & 0xff;
1105 if (IS_FWI2_CAPABLE(ha)) {
1106 ha->fw_attributes_h = mcp->mb[15];
1107 ha->fw_attributes_ext[0] = mcp->mb[16];
1108 ha->fw_attributes_ext[1] = mcp->mb[17];
1109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1110 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1111 __func__, mcp->mb[15], mcp->mb[6]);
1112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1113 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1114 __func__, mcp->mb[17], mcp->mb[16]);
1116 if (ha->fw_attributes_h & 0x4)
1117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1118 "%s: Firmware supports Extended Login 0x%x\n",
1119 __func__, ha->fw_attributes_h);
1121 if (ha->fw_attributes_h & 0x8)
1122 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1123 "%s: Firmware supports Exchange Offload 0x%x\n",
1124 __func__, ha->fw_attributes_h);
1127 * FW supports nvme and driver load parameter requested nvme.
1128 * BIT 26 of fw_attributes indicates NVMe support.
1130 if ((ha->fw_attributes_h &
1131 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1133 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1134 vha->flags.nvme_first_burst = 1;
1136 vha->flags.nvme_enabled = 1;
1137 ql_log(ql_log_info, vha, 0xd302,
1138 "%s: FC-NVMe is Enabled (0x%x)\n",
1139 __func__, ha->fw_attributes_h);
1142 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1143 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1144 ql_log(ql_log_info, vha, 0xd302,
1145 "Firmware supports NVMe2 0x%x\n",
1146 ha->fw_attributes_ext[0]);
1147 vha->flags.nvme2_enabled = 1;
1150 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1151 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1152 ha->flags.edif_enabled = 1;
1153 ql_log(ql_log_info, vha, 0xffff,
1154 "%s: edif is enabled\n", __func__);
1158 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1159 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1160 ha->serdes_version[1] = mcp->mb[8] >> 8;
1161 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1162 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1163 ha->mpi_version[1] = mcp->mb[11] >> 8;
1164 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1165 ha->pep_version[0] = mcp->mb[13] & 0xff;
1166 ha->pep_version[1] = mcp->mb[14] >> 8;
1167 ha->pep_version[2] = mcp->mb[14] & 0xff;
1168 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1169 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1170 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1171 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1172 if (IS_QLA28XX(ha)) {
1173 if (mcp->mb[16] & BIT_10)
1174 ha->flags.secure_fw = 1;
1176 ql_log(ql_log_info, vha, 0xffff,
1177 "Secure Flash Update in FW: %s\n",
1178 (ha->flags.secure_fw) ? "Supported" :
1182 if (ha->flags.scm_supported_a &&
1183 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1184 ha->flags.scm_supported_f = 1;
1185 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1187 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1188 (ha->flags.scm_supported_f) ? "Supported" :
1191 if (vha->flags.nvme2_enabled) {
1192 /* set BIT_15 of special feature control block for SLER */
1193 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1194 /* set BIT_14 of special feature control block for PI CTRL*/
1195 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1200 if (rval != QLA_SUCCESS) {
1202 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1206 "Done %s.\n", __func__);
1212 * qla2x00_get_fw_options
1213 * Set firmware options.
1216 * ha = adapter block pointer.
1217 * fwopt = pointer for firmware options.
1220 * qla2x00 local function return status code.
1226 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1230 mbx_cmd_t *mcp = &mc;
1232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1233 "Entered %s.\n", __func__);
1235 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1236 mcp->out_mb = MBX_0;
1237 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1238 mcp->tov = MBX_TOV_SECONDS;
1240 rval = qla2x00_mailbox_command(vha, mcp);
1242 if (rval != QLA_SUCCESS) {
1244 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1246 fwopts[0] = mcp->mb[0];
1247 fwopts[1] = mcp->mb[1];
1248 fwopts[2] = mcp->mb[2];
1249 fwopts[3] = mcp->mb[3];
1251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1252 "Done %s.\n", __func__);
1260 * qla2x00_set_fw_options
1261 * Set firmware options.
1264 * ha = adapter block pointer.
1265 * fwopt = pointer for firmware options.
1268 * qla2x00 local function return status code.
1274 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1278 mbx_cmd_t *mcp = &mc;
1280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1281 "Entered %s.\n", __func__);
1283 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1284 mcp->mb[1] = fwopts[1];
1285 mcp->mb[2] = fwopts[2];
1286 mcp->mb[3] = fwopts[3];
1287 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1289 if (IS_FWI2_CAPABLE(vha->hw)) {
1290 mcp->in_mb |= MBX_1;
1291 mcp->mb[10] = fwopts[10];
1292 mcp->out_mb |= MBX_10;
1294 mcp->mb[10] = fwopts[10];
1295 mcp->mb[11] = fwopts[11];
1296 mcp->mb[12] = 0; /* Undocumented, but used */
1297 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1299 mcp->tov = MBX_TOV_SECONDS;
1301 rval = qla2x00_mailbox_command(vha, mcp);
1303 fwopts[0] = mcp->mb[0];
1305 if (rval != QLA_SUCCESS) {
1307 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1308 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1312 "Done %s.\n", __func__);
1319 * qla2x00_mbx_reg_test
1320 * Mailbox register wrap test.
1323 * ha = adapter block pointer.
1324 * TARGET_QUEUE_LOCK must be released.
1325 * ADAPTER_STATE_LOCK must be released.
1328 * qla2x00 local function return status code.
1334 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1338 mbx_cmd_t *mcp = &mc;
1340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1341 "Entered %s.\n", __func__);
1343 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1344 mcp->mb[1] = 0xAAAA;
1345 mcp->mb[2] = 0x5555;
1346 mcp->mb[3] = 0xAA55;
1347 mcp->mb[4] = 0x55AA;
1348 mcp->mb[5] = 0xA5A5;
1349 mcp->mb[6] = 0x5A5A;
1350 mcp->mb[7] = 0x2525;
1351 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1352 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1353 mcp->tov = MBX_TOV_SECONDS;
1355 rval = qla2x00_mailbox_command(vha, mcp);
1357 if (rval == QLA_SUCCESS) {
1358 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1359 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1360 rval = QLA_FUNCTION_FAILED;
1361 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1362 mcp->mb[7] != 0x2525)
1363 rval = QLA_FUNCTION_FAILED;
1366 if (rval != QLA_SUCCESS) {
1368 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1373 "Done %s.\n", __func__);
1380 * qla2x00_verify_checksum
1381 * Verify firmware checksum.
1384 * ha = adapter block pointer.
1385 * TARGET_QUEUE_LOCK must be released.
1386 * ADAPTER_STATE_LOCK must be released.
1389 * qla2x00 local function return status code.
1395 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1399 mbx_cmd_t *mcp = &mc;
1401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1402 "Entered %s.\n", __func__);
1404 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1405 mcp->out_mb = MBX_0;
1407 if (IS_FWI2_CAPABLE(vha->hw)) {
1408 mcp->mb[1] = MSW(risc_addr);
1409 mcp->mb[2] = LSW(risc_addr);
1410 mcp->out_mb |= MBX_2|MBX_1;
1411 mcp->in_mb |= MBX_2|MBX_1;
1413 mcp->mb[1] = LSW(risc_addr);
1414 mcp->out_mb |= MBX_1;
1415 mcp->in_mb |= MBX_1;
1418 mcp->tov = MBX_TOV_SECONDS;
1420 rval = qla2x00_mailbox_command(vha, mcp);
1422 if (rval != QLA_SUCCESS) {
1423 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1424 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1425 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1428 "Done %s.\n", __func__);
1435 * qla2x00_issue_iocb
1436 * Issue IOCB using mailbox command
1439 * ha = adapter state pointer.
1440 * buffer = buffer pointer.
1441 * phys_addr = physical address of buffer.
1442 * size = size of buffer.
1443 * TARGET_QUEUE_LOCK must be released.
1444 * ADAPTER_STATE_LOCK must be released.
1447 * qla2x00 local function return status code.
1453 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1454 dma_addr_t phys_addr, size_t size, uint32_t tov)
1458 mbx_cmd_t *mcp = &mc;
1460 if (!vha->hw->flags.fw_started)
1461 return QLA_INVALID_COMMAND;
1463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1464 "Entered %s.\n", __func__);
1466 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1468 mcp->mb[2] = MSW(LSD(phys_addr));
1469 mcp->mb[3] = LSW(LSD(phys_addr));
1470 mcp->mb[6] = MSW(MSD(phys_addr));
1471 mcp->mb[7] = LSW(MSD(phys_addr));
1472 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1473 mcp->in_mb = MBX_1|MBX_0;
1476 rval = qla2x00_mailbox_command(vha, mcp);
1478 if (rval != QLA_SUCCESS) {
1480 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1482 sts_entry_t *sts_entry = buffer;
1484 /* Mask reserved bits. */
1485 sts_entry->entry_status &=
1486 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1488 "Done %s (status=%x).\n", __func__,
1489 sts_entry->entry_status);
1496 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1499 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1504 * qla2x00_abort_command
1505 * Abort command aborts a specified IOCB.
1508 * ha = adapter block pointer.
1509 * sp = SB structure pointer.
1512 * qla2x00 local function return status code.
1518 qla2x00_abort_command(srb_t *sp)
1520 unsigned long flags = 0;
1522 uint32_t handle = 0;
1524 mbx_cmd_t *mcp = &mc;
1525 fc_port_t *fcport = sp->fcport;
1526 scsi_qla_host_t *vha = fcport->vha;
1527 struct qla_hw_data *ha = vha->hw;
1528 struct req_que *req;
1529 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1532 "Entered %s.\n", __func__);
1535 req = sp->qpair->req;
1539 spin_lock_irqsave(&ha->hardware_lock, flags);
1540 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1541 if (req->outstanding_cmds[handle] == sp)
1544 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1546 if (handle == req->num_outstanding_cmds) {
1547 /* command not found */
1548 return QLA_FUNCTION_FAILED;
1551 mcp->mb[0] = MBC_ABORT_COMMAND;
1552 if (HAS_EXTENDED_IDS(ha))
1553 mcp->mb[1] = fcport->loop_id;
1555 mcp->mb[1] = fcport->loop_id << 8;
1556 mcp->mb[2] = (uint16_t)handle;
1557 mcp->mb[3] = (uint16_t)(handle >> 16);
1558 mcp->mb[6] = (uint16_t)cmd->device->lun;
1559 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1561 mcp->tov = MBX_TOV_SECONDS;
1563 rval = qla2x00_mailbox_command(vha, mcp);
1565 if (rval != QLA_SUCCESS) {
1566 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1569 "Done %s.\n", __func__);
1576 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1580 mbx_cmd_t *mcp = &mc;
1581 scsi_qla_host_t *vha;
1585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1586 "Entered %s.\n", __func__);
1588 mcp->mb[0] = MBC_ABORT_TARGET;
1589 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1590 if (HAS_EXTENDED_IDS(vha->hw)) {
1591 mcp->mb[1] = fcport->loop_id;
1593 mcp->out_mb |= MBX_10;
1595 mcp->mb[1] = fcport->loop_id << 8;
1597 mcp->mb[2] = vha->hw->loop_reset_delay;
1598 mcp->mb[9] = vha->vp_idx;
1601 mcp->tov = MBX_TOV_SECONDS;
1603 rval = qla2x00_mailbox_command(vha, mcp);
1604 if (rval != QLA_SUCCESS) {
1605 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1606 "Failed=%x.\n", rval);
1609 /* Issue marker IOCB. */
1610 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1612 if (rval2 != QLA_SUCCESS) {
1613 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1614 "Failed to issue marker IOCB (%x).\n", rval2);
1616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1617 "Done %s.\n", __func__);
1624 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1628 mbx_cmd_t *mcp = &mc;
1629 scsi_qla_host_t *vha;
1633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1634 "Entered %s.\n", __func__);
1636 mcp->mb[0] = MBC_LUN_RESET;
1637 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1638 if (HAS_EXTENDED_IDS(vha->hw))
1639 mcp->mb[1] = fcport->loop_id;
1641 mcp->mb[1] = fcport->loop_id << 8;
1642 mcp->mb[2] = (u32)l;
1644 mcp->mb[9] = vha->vp_idx;
1647 mcp->tov = MBX_TOV_SECONDS;
1649 rval = qla2x00_mailbox_command(vha, mcp);
1650 if (rval != QLA_SUCCESS) {
1651 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1654 /* Issue marker IOCB. */
1655 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1657 if (rval2 != QLA_SUCCESS) {
1658 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1659 "Failed to issue marker IOCB (%x).\n", rval2);
1661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1662 "Done %s.\n", __func__);
1669 * qla2x00_get_adapter_id
1670 * Get adapter ID and topology.
1673 * ha = adapter block pointer.
1674 * id = pointer for loop ID.
1675 * al_pa = pointer for AL_PA.
1676 * area = pointer for area.
1677 * domain = pointer for domain.
1678 * top = pointer for topology.
1679 * TARGET_QUEUE_LOCK must be released.
1680 * ADAPTER_STATE_LOCK must be released.
1683 * qla2x00 local function return status code.
1689 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1690 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1694 mbx_cmd_t *mcp = &mc;
1696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1697 "Entered %s.\n", __func__);
1699 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1700 mcp->mb[9] = vha->vp_idx;
1701 mcp->out_mb = MBX_9|MBX_0;
1702 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1703 if (IS_CNA_CAPABLE(vha->hw))
1704 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1705 if (IS_FWI2_CAPABLE(vha->hw))
1706 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1707 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1708 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1710 mcp->tov = MBX_TOV_SECONDS;
1712 rval = qla2x00_mailbox_command(vha, mcp);
1713 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1714 rval = QLA_COMMAND_ERROR;
1715 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1716 rval = QLA_INVALID_COMMAND;
1720 *al_pa = LSB(mcp->mb[2]);
1721 *area = MSB(mcp->mb[2]);
1722 *domain = LSB(mcp->mb[3]);
1724 *sw_cap = mcp->mb[7];
1726 if (rval != QLA_SUCCESS) {
1728 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1731 "Done %s.\n", __func__);
1733 if (IS_CNA_CAPABLE(vha->hw)) {
1734 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1735 vha->fcoe_fcf_idx = mcp->mb[10];
1736 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1737 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1738 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1739 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1740 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1741 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1743 /* If FA-WWN supported */
1744 if (IS_FAWWN_CAPABLE(vha->hw)) {
1745 if (mcp->mb[7] & BIT_14) {
1746 vha->port_name[0] = MSB(mcp->mb[16]);
1747 vha->port_name[1] = LSB(mcp->mb[16]);
1748 vha->port_name[2] = MSB(mcp->mb[17]);
1749 vha->port_name[3] = LSB(mcp->mb[17]);
1750 vha->port_name[4] = MSB(mcp->mb[18]);
1751 vha->port_name[5] = LSB(mcp->mb[18]);
1752 vha->port_name[6] = MSB(mcp->mb[19]);
1753 vha->port_name[7] = LSB(mcp->mb[19]);
1754 fc_host_port_name(vha->host) =
1755 wwn_to_u64(vha->port_name);
1756 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1757 "FA-WWN acquired %016llx\n",
1758 wwn_to_u64(vha->port_name));
1762 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1763 vha->bbcr = mcp->mb[15];
1764 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1765 ql_log(ql_log_info, vha, 0x11a4,
1766 "SCM: EDC ELS completed, flags 0x%x\n",
1769 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1770 vha->hw->flags.scm_enabled = 1;
1771 vha->scm_fabric_connection_flags |=
1772 SCM_FLAG_RDF_COMPLETED;
1773 ql_log(ql_log_info, vha, 0x11a5,
1774 "SCM: RDF ELS completed, flags 0x%x\n",
1784 * qla2x00_get_retry_cnt
1785 * Get current firmware login retry count and delay.
1788 * ha = adapter block pointer.
1789 * retry_cnt = pointer to login retry count.
1790 * tov = pointer to login timeout value.
1793 * qla2x00 local function return status code.
1799 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1805 mbx_cmd_t *mcp = &mc;
1807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1808 "Entered %s.\n", __func__);
1810 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1811 mcp->out_mb = MBX_0;
1812 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1813 mcp->tov = MBX_TOV_SECONDS;
1815 rval = qla2x00_mailbox_command(vha, mcp);
1817 if (rval != QLA_SUCCESS) {
1819 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1820 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1822 /* Convert returned data and check our values. */
1823 *r_a_tov = mcp->mb[3] / 2;
1824 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1825 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1826 /* Update to the larger values */
1827 *retry_cnt = (uint8_t)mcp->mb[1];
1831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1832 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1839 * qla2x00_init_firmware
1840 * Initialize adapter firmware.
1843 * ha = adapter block pointer.
1844 * dptr = Initialization control block pointer.
1845 * size = size of initialization control block.
1846 * TARGET_QUEUE_LOCK must be released.
1847 * ADAPTER_STATE_LOCK must be released.
1850 * qla2x00 local function return status code.
1856 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1860 mbx_cmd_t *mcp = &mc;
1861 struct qla_hw_data *ha = vha->hw;
1863 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1864 "Entered %s.\n", __func__);
1866 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1867 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1868 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1870 if (ha->flags.npiv_supported)
1871 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1873 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1876 mcp->mb[2] = MSW(ha->init_cb_dma);
1877 mcp->mb[3] = LSW(ha->init_cb_dma);
1878 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1879 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1880 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1881 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1883 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1884 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1885 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1886 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1887 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1888 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1891 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1892 mcp->mb[1] |= BIT_1;
1893 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1894 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1895 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1896 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1897 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1898 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1901 /* 1 and 2 should normally be captured. */
1902 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1903 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1904 /* mb3 is additional info about the installed SFP. */
1905 mcp->in_mb |= MBX_3;
1906 mcp->buf_size = size;
1907 mcp->flags = MBX_DMA_OUT;
1908 mcp->tov = MBX_TOV_SECONDS;
1909 rval = qla2x00_mailbox_command(vha, mcp);
1911 if (rval != QLA_SUCCESS) {
1913 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1914 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1915 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1917 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1918 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1919 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1921 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1922 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1923 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1924 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1927 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1928 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1929 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1930 "Invalid SFP/Validation Failed\n");
1932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1933 "Done %s.\n", __func__);
1941 * qla2x00_get_port_database
1942 * Issue normal/enhanced get port database mailbox command
1943 * and copy device name as necessary.
1946 * ha = adapter state pointer.
1947 * dev = structure pointer.
1948 * opt = enhanced cmd option byte.
1951 * qla2x00 local function return status code.
1957 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1961 mbx_cmd_t *mcp = &mc;
1962 port_database_t *pd;
1963 struct port_database_24xx *pd24;
1965 struct qla_hw_data *ha = vha->hw;
1967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1968 "Entered %s.\n", __func__);
1971 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1973 ql_log(ql_log_warn, vha, 0x1050,
1974 "Failed to allocate port database structure.\n");
1976 return QLA_MEMORY_ALLOC_FAILED;
1979 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1980 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1981 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1982 mcp->mb[2] = MSW(pd_dma);
1983 mcp->mb[3] = LSW(pd_dma);
1984 mcp->mb[6] = MSW(MSD(pd_dma));
1985 mcp->mb[7] = LSW(MSD(pd_dma));
1986 mcp->mb[9] = vha->vp_idx;
1987 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1989 if (IS_FWI2_CAPABLE(ha)) {
1990 mcp->mb[1] = fcport->loop_id;
1992 mcp->out_mb |= MBX_10|MBX_1;
1993 mcp->in_mb |= MBX_1;
1994 } else if (HAS_EXTENDED_IDS(ha)) {
1995 mcp->mb[1] = fcport->loop_id;
1997 mcp->out_mb |= MBX_10|MBX_1;
1999 mcp->mb[1] = fcport->loop_id << 8 | opt;
2000 mcp->out_mb |= MBX_1;
2002 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2003 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2004 mcp->flags = MBX_DMA_IN;
2005 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2006 rval = qla2x00_mailbox_command(vha, mcp);
2007 if (rval != QLA_SUCCESS)
2010 if (IS_FWI2_CAPABLE(ha)) {
2012 u8 current_login_state, last_login_state;
2014 pd24 = (struct port_database_24xx *) pd;
2016 /* Check for logged in state. */
2017 if (NVME_TARGET(ha, fcport)) {
2018 current_login_state = pd24->current_login_state >> 4;
2019 last_login_state = pd24->last_login_state >> 4;
2021 current_login_state = pd24->current_login_state & 0xf;
2022 last_login_state = pd24->last_login_state & 0xf;
2024 fcport->current_login_state = pd24->current_login_state;
2025 fcport->last_login_state = pd24->last_login_state;
2027 /* Check for logged in state. */
2028 if (current_login_state != PDS_PRLI_COMPLETE &&
2029 last_login_state != PDS_PRLI_COMPLETE) {
2030 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2031 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2032 current_login_state, last_login_state,
2034 rval = QLA_FUNCTION_FAILED;
2040 if (fcport->loop_id == FC_NO_LOOP_ID ||
2041 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2042 memcmp(fcport->port_name, pd24->port_name, 8))) {
2043 /* We lost the device mid way. */
2044 rval = QLA_NOT_LOGGED_IN;
2048 /* Names are little-endian. */
2049 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2050 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2052 /* Get port_id of device. */
2053 fcport->d_id.b.domain = pd24->port_id[0];
2054 fcport->d_id.b.area = pd24->port_id[1];
2055 fcport->d_id.b.al_pa = pd24->port_id[2];
2056 fcport->d_id.b.rsvd_1 = 0;
2058 /* If not target must be initiator or unknown type. */
2059 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2060 fcport->port_type = FCT_INITIATOR;
2062 fcport->port_type = FCT_TARGET;
2064 /* Passback COS information. */
2065 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2066 FC_COS_CLASS2 : FC_COS_CLASS3;
2068 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2069 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2073 /* Check for logged in state. */
2074 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2075 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2076 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2077 "Unable to verify login-state (%x/%x) - "
2078 "portid=%02x%02x%02x.\n", pd->master_state,
2079 pd->slave_state, fcport->d_id.b.domain,
2080 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2081 rval = QLA_FUNCTION_FAILED;
2085 if (fcport->loop_id == FC_NO_LOOP_ID ||
2086 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2087 memcmp(fcport->port_name, pd->port_name, 8))) {
2088 /* We lost the device mid way. */
2089 rval = QLA_NOT_LOGGED_IN;
2093 /* Names are little-endian. */
2094 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2095 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2097 /* Get port_id of device. */
2098 fcport->d_id.b.domain = pd->port_id[0];
2099 fcport->d_id.b.area = pd->port_id[3];
2100 fcport->d_id.b.al_pa = pd->port_id[2];
2101 fcport->d_id.b.rsvd_1 = 0;
2103 /* If not target must be initiator or unknown type. */
2104 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2105 fcport->port_type = FCT_INITIATOR;
2107 fcport->port_type = FCT_TARGET;
2109 /* Passback COS information. */
2110 fcport->supported_classes = (pd->options & BIT_4) ?
2111 FC_COS_CLASS2 : FC_COS_CLASS3;
2115 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2118 if (rval != QLA_SUCCESS) {
2119 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2120 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2121 mcp->mb[0], mcp->mb[1]);
2123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2124 "Done %s.\n", __func__);
2131 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2132 struct port_database_24xx *pdb)
2135 mbx_cmd_t *mcp = &mc;
2139 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2140 "Entered %s.\n", __func__);
2142 memset(pdb, 0, sizeof(*pdb));
2144 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2145 sizeof(*pdb), DMA_FROM_DEVICE);
2147 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2148 return QLA_MEMORY_ALLOC_FAILED;
2151 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2152 mcp->mb[1] = nport_handle;
2153 mcp->mb[2] = MSW(LSD(pdb_dma));
2154 mcp->mb[3] = LSW(LSD(pdb_dma));
2155 mcp->mb[6] = MSW(MSD(pdb_dma));
2156 mcp->mb[7] = LSW(MSD(pdb_dma));
2159 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2160 mcp->in_mb = MBX_1|MBX_0;
2161 mcp->buf_size = sizeof(*pdb);
2162 mcp->flags = MBX_DMA_IN;
2163 mcp->tov = vha->hw->login_timeout * 2;
2164 rval = qla2x00_mailbox_command(vha, mcp);
2166 if (rval != QLA_SUCCESS) {
2167 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2168 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2169 rval, mcp->mb[0], mcp->mb[1]);
2171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2172 "Done %s.\n", __func__);
2175 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2176 sizeof(*pdb), DMA_FROM_DEVICE);
2182 * qla2x00_get_firmware_state
2183 * Get adapter firmware state.
2186 * ha = adapter block pointer.
2187 * dptr = pointer for firmware state.
2188 * TARGET_QUEUE_LOCK must be released.
2189 * ADAPTER_STATE_LOCK must be released.
2192 * qla2x00 local function return status code.
2198 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2202 mbx_cmd_t *mcp = &mc;
2203 struct qla_hw_data *ha = vha->hw;
2205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2206 "Entered %s.\n", __func__);
2208 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2209 mcp->out_mb = MBX_0;
2210 if (IS_FWI2_CAPABLE(vha->hw))
2211 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2213 mcp->in_mb = MBX_1|MBX_0;
2214 mcp->tov = MBX_TOV_SECONDS;
2216 rval = qla2x00_mailbox_command(vha, mcp);
2218 /* Return firmware states. */
2219 states[0] = mcp->mb[1];
2220 if (IS_FWI2_CAPABLE(vha->hw)) {
2221 states[1] = mcp->mb[2];
2222 states[2] = mcp->mb[3]; /* SFP info */
2223 states[3] = mcp->mb[4];
2224 states[4] = mcp->mb[5];
2225 states[5] = mcp->mb[6]; /* DPORT status */
2228 if (rval != QLA_SUCCESS) {
2230 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2232 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2233 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2234 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2235 "Invalid SFP/Validation Failed\n");
2237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2238 "Done %s.\n", __func__);
2245 * qla2x00_get_port_name
2246 * Issue get port name mailbox command.
2247 * Returned name is in big endian format.
2250 * ha = adapter block pointer.
2251 * loop_id = loop ID of device.
2252 * name = pointer for name.
2253 * TARGET_QUEUE_LOCK must be released.
2254 * ADAPTER_STATE_LOCK must be released.
2257 * qla2x00 local function return status code.
2263 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2268 mbx_cmd_t *mcp = &mc;
2270 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2271 "Entered %s.\n", __func__);
2273 mcp->mb[0] = MBC_GET_PORT_NAME;
2274 mcp->mb[9] = vha->vp_idx;
2275 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2276 if (HAS_EXTENDED_IDS(vha->hw)) {
2277 mcp->mb[1] = loop_id;
2279 mcp->out_mb |= MBX_10;
2281 mcp->mb[1] = loop_id << 8 | opt;
2284 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2285 mcp->tov = MBX_TOV_SECONDS;
2287 rval = qla2x00_mailbox_command(vha, mcp);
2289 if (rval != QLA_SUCCESS) {
2291 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2294 /* This function returns name in big endian. */
2295 name[0] = MSB(mcp->mb[2]);
2296 name[1] = LSB(mcp->mb[2]);
2297 name[2] = MSB(mcp->mb[3]);
2298 name[3] = LSB(mcp->mb[3]);
2299 name[4] = MSB(mcp->mb[6]);
2300 name[5] = LSB(mcp->mb[6]);
2301 name[6] = MSB(mcp->mb[7]);
2302 name[7] = LSB(mcp->mb[7]);
2305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2306 "Done %s.\n", __func__);
2313 * qla24xx_link_initialization
2314 * Issue link initialization mailbox command.
2317 * ha = adapter block pointer.
2318 * TARGET_QUEUE_LOCK must be released.
2319 * ADAPTER_STATE_LOCK must be released.
2322 * qla2x00 local function return status code.
2328 qla24xx_link_initialize(scsi_qla_host_t *vha)
2332 mbx_cmd_t *mcp = &mc;
2334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2335 "Entered %s.\n", __func__);
2337 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2338 return QLA_FUNCTION_FAILED;
2340 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2342 if (vha->hw->operating_mode == LOOP)
2343 mcp->mb[1] |= BIT_6;
2345 mcp->mb[1] |= BIT_5;
2348 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2350 mcp->tov = MBX_TOV_SECONDS;
2352 rval = qla2x00_mailbox_command(vha, mcp);
2354 if (rval != QLA_SUCCESS) {
2355 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2358 "Done %s.\n", __func__);
2366 * Issue LIP reset mailbox command.
2369 * ha = adapter block pointer.
2370 * TARGET_QUEUE_LOCK must be released.
2371 * ADAPTER_STATE_LOCK must be released.
2374 * qla2x00 local function return status code.
2380 qla2x00_lip_reset(scsi_qla_host_t *vha)
2384 mbx_cmd_t *mcp = &mc;
2386 ql_dbg(ql_dbg_disc, vha, 0x105a,
2387 "Entered %s.\n", __func__);
2389 if (IS_CNA_CAPABLE(vha->hw)) {
2390 /* Logout across all FCFs. */
2391 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2394 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2395 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2396 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2399 mcp->mb[3] = vha->hw->loop_reset_delay;
2400 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2402 mcp->mb[0] = MBC_LIP_RESET;
2403 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2404 if (HAS_EXTENDED_IDS(vha->hw)) {
2405 mcp->mb[1] = 0x00ff;
2407 mcp->out_mb |= MBX_10;
2409 mcp->mb[1] = 0xff00;
2411 mcp->mb[2] = vha->hw->loop_reset_delay;
2415 mcp->tov = MBX_TOV_SECONDS;
2417 rval = qla2x00_mailbox_command(vha, mcp);
2419 if (rval != QLA_SUCCESS) {
2421 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2425 "Done %s.\n", __func__);
2436 * ha = adapter block pointer.
2437 * sns = pointer for command.
2438 * cmd_size = command size.
2439 * buf_size = response/command size.
2440 * TARGET_QUEUE_LOCK must be released.
2441 * ADAPTER_STATE_LOCK must be released.
2444 * qla2x00 local function return status code.
2450 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2451 uint16_t cmd_size, size_t buf_size)
2455 mbx_cmd_t *mcp = &mc;
2457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2458 "Entered %s.\n", __func__);
2460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2461 "Retry cnt=%d ratov=%d total tov=%d.\n",
2462 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2464 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2465 mcp->mb[1] = cmd_size;
2466 mcp->mb[2] = MSW(sns_phys_address);
2467 mcp->mb[3] = LSW(sns_phys_address);
2468 mcp->mb[6] = MSW(MSD(sns_phys_address));
2469 mcp->mb[7] = LSW(MSD(sns_phys_address));
2470 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2471 mcp->in_mb = MBX_0|MBX_1;
2472 mcp->buf_size = buf_size;
2473 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2474 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2475 rval = qla2x00_mailbox_command(vha, mcp);
2477 if (rval != QLA_SUCCESS) {
2479 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2480 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2481 rval, mcp->mb[0], mcp->mb[1]);
2484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2485 "Done %s.\n", __func__);
2492 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2493 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2497 struct logio_entry_24xx *lg;
2500 struct qla_hw_data *ha = vha->hw;
2501 struct req_que *req;
2503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2504 "Entered %s.\n", __func__);
2506 if (vha->vp_idx && vha->qpair)
2507 req = vha->qpair->req;
2509 req = ha->req_q_map[0];
2511 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2513 ql_log(ql_log_warn, vha, 0x1062,
2514 "Failed to allocate login IOCB.\n");
2515 return QLA_MEMORY_ALLOC_FAILED;
2518 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2519 lg->entry_count = 1;
2520 lg->handle = make_handle(req->id, lg->handle);
2521 lg->nport_handle = cpu_to_le16(loop_id);
2522 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2524 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2526 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2527 lg->port_id[0] = al_pa;
2528 lg->port_id[1] = area;
2529 lg->port_id[2] = domain;
2530 lg->vp_index = vha->vp_idx;
2531 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2532 (ha->r_a_tov / 10 * 2) + 2);
2533 if (rval != QLA_SUCCESS) {
2534 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2535 "Failed to issue login IOCB (%x).\n", rval);
2536 } else if (lg->entry_status != 0) {
2537 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2538 "Failed to complete IOCB -- error status (%x).\n",
2540 rval = QLA_FUNCTION_FAILED;
2541 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2542 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2543 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2545 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2546 "Failed to complete IOCB -- completion status (%x) "
2547 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2551 case LSC_SCODE_PORTID_USED:
2552 mb[0] = MBS_PORT_ID_USED;
2553 mb[1] = LSW(iop[1]);
2555 case LSC_SCODE_NPORT_USED:
2556 mb[0] = MBS_LOOP_ID_USED;
2558 case LSC_SCODE_NOLINK:
2559 case LSC_SCODE_NOIOCB:
2560 case LSC_SCODE_NOXCB:
2561 case LSC_SCODE_CMD_FAILED:
2562 case LSC_SCODE_NOFABRIC:
2563 case LSC_SCODE_FW_NOT_READY:
2564 case LSC_SCODE_NOT_LOGGED_IN:
2565 case LSC_SCODE_NOPCB:
2566 case LSC_SCODE_ELS_REJECT:
2567 case LSC_SCODE_CMD_PARAM_ERR:
2568 case LSC_SCODE_NONPORT:
2569 case LSC_SCODE_LOGGED_IN:
2570 case LSC_SCODE_NOFLOGI_ACC:
2572 mb[0] = MBS_COMMAND_ERROR;
2576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2577 "Done %s.\n", __func__);
2579 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2581 mb[0] = MBS_COMMAND_COMPLETE;
2583 if (iop[0] & BIT_4) {
2589 /* Passback COS information. */
2591 if (lg->io_parameter[7] || lg->io_parameter[8])
2592 mb[10] |= BIT_0; /* Class 2. */
2593 if (lg->io_parameter[9] || lg->io_parameter[10])
2594 mb[10] |= BIT_1; /* Class 3. */
2595 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2596 mb[10] |= BIT_7; /* Confirmed Completion
2601 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2607 * qla2x00_login_fabric
2608 * Issue login fabric port mailbox command.
2611 * ha = adapter block pointer.
2612 * loop_id = device loop ID.
2613 * domain = device domain.
2614 * area = device area.
2615 * al_pa = device AL_PA.
2616 * status = pointer for return status.
2617 * opt = command options.
2618 * TARGET_QUEUE_LOCK must be released.
2619 * ADAPTER_STATE_LOCK must be released.
2622 * qla2x00 local function return status code.
2628 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2629 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2633 mbx_cmd_t *mcp = &mc;
2634 struct qla_hw_data *ha = vha->hw;
2636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2637 "Entered %s.\n", __func__);
2639 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2640 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2641 if (HAS_EXTENDED_IDS(ha)) {
2642 mcp->mb[1] = loop_id;
2644 mcp->out_mb |= MBX_10;
2646 mcp->mb[1] = (loop_id << 8) | opt;
2648 mcp->mb[2] = domain;
2649 mcp->mb[3] = area << 8 | al_pa;
2651 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2652 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2654 rval = qla2x00_mailbox_command(vha, mcp);
2656 /* Return mailbox statuses. */
2663 /* COS retrieved from Get-Port-Database mailbox command. */
2667 if (rval != QLA_SUCCESS) {
2668 /* RLU tmp code: need to change main mailbox_command function to
2669 * return ok even when the mailbox completion value is not
2670 * SUCCESS. The caller needs to be responsible to interpret
2671 * the return values of this mailbox command if we're not
2672 * to change too much of the existing code.
2674 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2675 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2676 mcp->mb[0] == 0x4006)
2680 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2681 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2682 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2686 "Done %s.\n", __func__);
2693 * qla2x00_login_local_device
2694 * Issue login loop port mailbox command.
2697 * ha = adapter block pointer.
2698 * loop_id = device loop ID.
2699 * opt = command options.
2702 * Return status code.
2709 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2710 uint16_t *mb_ret, uint8_t opt)
2714 mbx_cmd_t *mcp = &mc;
2715 struct qla_hw_data *ha = vha->hw;
2717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2718 "Entered %s.\n", __func__);
2720 if (IS_FWI2_CAPABLE(ha))
2721 return qla24xx_login_fabric(vha, fcport->loop_id,
2722 fcport->d_id.b.domain, fcport->d_id.b.area,
2723 fcport->d_id.b.al_pa, mb_ret, opt);
2725 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2726 if (HAS_EXTENDED_IDS(ha))
2727 mcp->mb[1] = fcport->loop_id;
2729 mcp->mb[1] = fcport->loop_id << 8;
2731 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2732 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2733 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2735 rval = qla2x00_mailbox_command(vha, mcp);
2737 /* Return mailbox statuses. */
2738 if (mb_ret != NULL) {
2739 mb_ret[0] = mcp->mb[0];
2740 mb_ret[1] = mcp->mb[1];
2741 mb_ret[6] = mcp->mb[6];
2742 mb_ret[7] = mcp->mb[7];
2745 if (rval != QLA_SUCCESS) {
2746 /* AV tmp code: need to change main mailbox_command function to
2747 * return ok even when the mailbox completion value is not
2748 * SUCCESS. The caller needs to be responsible to interpret
2749 * the return values of this mailbox command if we're not
2750 * to change too much of the existing code.
2752 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2755 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2756 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2757 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2761 "Done %s.\n", __func__);
2768 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2769 uint8_t area, uint8_t al_pa)
2772 struct logio_entry_24xx *lg;
2774 struct qla_hw_data *ha = vha->hw;
2775 struct req_que *req;
2777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2778 "Entered %s.\n", __func__);
2780 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2782 ql_log(ql_log_warn, vha, 0x106e,
2783 "Failed to allocate logout IOCB.\n");
2784 return QLA_MEMORY_ALLOC_FAILED;
2788 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2789 lg->entry_count = 1;
2790 lg->handle = make_handle(req->id, lg->handle);
2791 lg->nport_handle = cpu_to_le16(loop_id);
2793 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2795 lg->port_id[0] = al_pa;
2796 lg->port_id[1] = area;
2797 lg->port_id[2] = domain;
2798 lg->vp_index = vha->vp_idx;
2799 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2800 (ha->r_a_tov / 10 * 2) + 2);
2801 if (rval != QLA_SUCCESS) {
2802 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2803 "Failed to issue logout IOCB (%x).\n", rval);
2804 } else if (lg->entry_status != 0) {
2805 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2806 "Failed to complete IOCB -- error status (%x).\n",
2808 rval = QLA_FUNCTION_FAILED;
2809 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2810 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2811 "Failed to complete IOCB -- completion status (%x) "
2812 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2813 le32_to_cpu(lg->io_parameter[0]),
2814 le32_to_cpu(lg->io_parameter[1]));
2817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2818 "Done %s.\n", __func__);
2821 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2827 * qla2x00_fabric_logout
2828 * Issue logout fabric port mailbox command.
2831 * ha = adapter block pointer.
2832 * loop_id = device loop ID.
2833 * TARGET_QUEUE_LOCK must be released.
2834 * ADAPTER_STATE_LOCK must be released.
2837 * qla2x00 local function return status code.
2843 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2844 uint8_t area, uint8_t al_pa)
2848 mbx_cmd_t *mcp = &mc;
2850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2851 "Entered %s.\n", __func__);
2853 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2854 mcp->out_mb = MBX_1|MBX_0;
2855 if (HAS_EXTENDED_IDS(vha->hw)) {
2856 mcp->mb[1] = loop_id;
2858 mcp->out_mb |= MBX_10;
2860 mcp->mb[1] = loop_id << 8;
2863 mcp->in_mb = MBX_1|MBX_0;
2864 mcp->tov = MBX_TOV_SECONDS;
2866 rval = qla2x00_mailbox_command(vha, mcp);
2868 if (rval != QLA_SUCCESS) {
2870 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2871 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2875 "Done %s.\n", __func__);
2882 * qla2x00_full_login_lip
2883 * Issue full login LIP mailbox command.
2886 * ha = adapter block pointer.
2887 * TARGET_QUEUE_LOCK must be released.
2888 * ADAPTER_STATE_LOCK must be released.
2891 * qla2x00 local function return status code.
2897 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2901 mbx_cmd_t *mcp = &mc;
2903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2904 "Entered %s.\n", __func__);
2906 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2907 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2910 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2912 mcp->tov = MBX_TOV_SECONDS;
2914 rval = qla2x00_mailbox_command(vha, mcp);
2916 if (rval != QLA_SUCCESS) {
2918 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2922 "Done %s.\n", __func__);
2929 * qla2x00_get_id_list
2932 * ha = adapter block pointer.
2935 * qla2x00 local function return status code.
2941 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2946 mbx_cmd_t *mcp = &mc;
2948 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2949 "Entered %s.\n", __func__);
2951 if (id_list == NULL)
2952 return QLA_FUNCTION_FAILED;
2954 mcp->mb[0] = MBC_GET_ID_LIST;
2955 mcp->out_mb = MBX_0;
2956 if (IS_FWI2_CAPABLE(vha->hw)) {
2957 mcp->mb[2] = MSW(id_list_dma);
2958 mcp->mb[3] = LSW(id_list_dma);
2959 mcp->mb[6] = MSW(MSD(id_list_dma));
2960 mcp->mb[7] = LSW(MSD(id_list_dma));
2962 mcp->mb[9] = vha->vp_idx;
2963 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2965 mcp->mb[1] = MSW(id_list_dma);
2966 mcp->mb[2] = LSW(id_list_dma);
2967 mcp->mb[3] = MSW(MSD(id_list_dma));
2968 mcp->mb[6] = LSW(MSD(id_list_dma));
2969 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2971 mcp->in_mb = MBX_1|MBX_0;
2972 mcp->tov = MBX_TOV_SECONDS;
2974 rval = qla2x00_mailbox_command(vha, mcp);
2976 if (rval != QLA_SUCCESS) {
2978 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2980 *entries = mcp->mb[1];
2981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2982 "Done %s.\n", __func__);
2989 * qla2x00_get_resource_cnts
2990 * Get current firmware resource counts.
2993 * ha = adapter block pointer.
2996 * qla2x00 local function return status code.
3002 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3004 struct qla_hw_data *ha = vha->hw;
3007 mbx_cmd_t *mcp = &mc;
3009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3010 "Entered %s.\n", __func__);
3012 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3013 mcp->out_mb = MBX_0;
3014 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3015 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3016 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3017 mcp->in_mb |= MBX_12;
3018 mcp->tov = MBX_TOV_SECONDS;
3020 rval = qla2x00_mailbox_command(vha, mcp);
3022 if (rval != QLA_SUCCESS) {
3024 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3025 "Failed mb[0]=%x.\n", mcp->mb[0]);
3027 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3028 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3029 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3030 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3031 mcp->mb[11], mcp->mb[12]);
3033 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3034 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3035 ha->cur_fw_xcb_count = mcp->mb[3];
3036 ha->orig_fw_xcb_count = mcp->mb[6];
3037 ha->cur_fw_iocb_count = mcp->mb[7];
3038 ha->orig_fw_iocb_count = mcp->mb[10];
3039 if (ha->flags.npiv_supported)
3040 ha->max_npiv_vports = mcp->mb[11];
3041 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3042 ha->fw_max_fcf_count = mcp->mb[12];
3049 * qla2x00_get_fcal_position_map
3050 * Get FCAL (LILP) position map using mailbox command
3053 * ha = adapter state pointer.
3054 * pos_map = buffer pointer (can be NULL).
3057 * qla2x00 local function return status code.
3063 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3067 mbx_cmd_t *mcp = &mc;
3069 dma_addr_t pmap_dma;
3070 struct qla_hw_data *ha = vha->hw;
3072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3073 "Entered %s.\n", __func__);
3075 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3077 ql_log(ql_log_warn, vha, 0x1080,
3078 "Memory alloc failed.\n");
3079 return QLA_MEMORY_ALLOC_FAILED;
3082 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3083 mcp->mb[2] = MSW(pmap_dma);
3084 mcp->mb[3] = LSW(pmap_dma);
3085 mcp->mb[6] = MSW(MSD(pmap_dma));
3086 mcp->mb[7] = LSW(MSD(pmap_dma));
3087 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3088 mcp->in_mb = MBX_1|MBX_0;
3089 mcp->buf_size = FCAL_MAP_SIZE;
3090 mcp->flags = MBX_DMA_IN;
3091 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3092 rval = qla2x00_mailbox_command(vha, mcp);
3094 if (rval == QLA_SUCCESS) {
3095 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3096 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3097 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3098 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3102 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3104 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3106 if (rval != QLA_SUCCESS) {
3107 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3110 "Done %s.\n", __func__);
3117 * qla2x00_get_link_status
3120 * ha = adapter block pointer.
3121 * loop_id = device loop ID.
3122 * ret_buf = pointer to link status return buffer.
3126 * BIT_0 = mem alloc error.
3127 * BIT_1 = mailbox error.
3130 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3131 struct link_statistics *stats, dma_addr_t stats_dma)
3135 mbx_cmd_t *mcp = &mc;
3136 uint32_t *iter = (uint32_t *)stats;
3137 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3138 struct qla_hw_data *ha = vha->hw;
3140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3141 "Entered %s.\n", __func__);
3143 mcp->mb[0] = MBC_GET_LINK_STATUS;
3144 mcp->mb[2] = MSW(LSD(stats_dma));
3145 mcp->mb[3] = LSW(LSD(stats_dma));
3146 mcp->mb[6] = MSW(MSD(stats_dma));
3147 mcp->mb[7] = LSW(MSD(stats_dma));
3148 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3150 if (IS_FWI2_CAPABLE(ha)) {
3151 mcp->mb[1] = loop_id;
3154 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3155 mcp->in_mb |= MBX_1;
3156 } else if (HAS_EXTENDED_IDS(ha)) {
3157 mcp->mb[1] = loop_id;
3159 mcp->out_mb |= MBX_10|MBX_1;
3161 mcp->mb[1] = loop_id << 8;
3162 mcp->out_mb |= MBX_1;
3164 mcp->tov = MBX_TOV_SECONDS;
3165 mcp->flags = IOCTL_CMD;
3166 rval = qla2x00_mailbox_command(vha, mcp);
3168 if (rval == QLA_SUCCESS) {
3169 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3170 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3171 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3172 rval = QLA_FUNCTION_FAILED;
3174 /* Re-endianize - firmware data is le32. */
3175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3176 "Done %s.\n", __func__);
3177 for ( ; dwords--; iter++)
3182 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3189 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3190 dma_addr_t stats_dma, uint16_t options)
3194 mbx_cmd_t *mcp = &mc;
3195 uint32_t *iter = (uint32_t *)stats;
3196 ushort dwords = sizeof(*stats)/sizeof(*iter);
3198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3199 "Entered %s.\n", __func__);
3201 memset(&mc, 0, sizeof(mc));
3202 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3203 mc.mb[2] = MSW(LSD(stats_dma));
3204 mc.mb[3] = LSW(LSD(stats_dma));
3205 mc.mb[6] = MSW(MSD(stats_dma));
3206 mc.mb[7] = LSW(MSD(stats_dma));
3208 mc.mb[9] = vha->vp_idx;
3209 mc.mb[10] = options;
3211 rval = qla24xx_send_mb_cmd(vha, &mc);
3213 if (rval == QLA_SUCCESS) {
3214 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3215 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3216 "Failed mb[0]=%x.\n", mcp->mb[0]);
3217 rval = QLA_FUNCTION_FAILED;
3219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3220 "Done %s.\n", __func__);
3221 /* Re-endianize - firmware data is le32. */
3222 for ( ; dwords--; iter++)
3227 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3234 qla24xx_abort_command(srb_t *sp)
3237 unsigned long flags = 0;
3239 struct abort_entry_24xx *abt;
3242 fc_port_t *fcport = sp->fcport;
3243 struct scsi_qla_host *vha = fcport->vha;
3244 struct qla_hw_data *ha = vha->hw;
3245 struct req_que *req = vha->req;
3246 struct qla_qpair *qpair = sp->qpair;
3248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3249 "Entered %s.\n", __func__);
3252 req = sp->qpair->req;
3254 return QLA_ERR_NO_QPAIR;
3256 if (ql2xasynctmfenable)
3257 return qla24xx_async_abort_command(sp);
3259 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3260 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3261 if (req->outstanding_cmds[handle] == sp)
3264 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3265 if (handle == req->num_outstanding_cmds) {
3266 /* Command not found. */
3267 return QLA_ERR_NOT_FOUND;
3270 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3272 ql_log(ql_log_warn, vha, 0x108d,
3273 "Failed to allocate abort IOCB.\n");
3274 return QLA_MEMORY_ALLOC_FAILED;
3277 abt->entry_type = ABORT_IOCB_TYPE;
3278 abt->entry_count = 1;
3279 abt->handle = make_handle(req->id, abt->handle);
3280 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3281 abt->handle_to_abort = make_handle(req->id, handle);
3282 abt->port_id[0] = fcport->d_id.b.al_pa;
3283 abt->port_id[1] = fcport->d_id.b.area;
3284 abt->port_id[2] = fcport->d_id.b.domain;
3285 abt->vp_index = fcport->vha->vp_idx;
3287 abt->req_que_no = cpu_to_le16(req->id);
3288 /* Need to pass original sp */
3289 qla_nvme_abort_set_option(abt, sp);
3291 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3292 if (rval != QLA_SUCCESS) {
3293 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3294 "Failed to issue IOCB (%x).\n", rval);
3295 } else if (abt->entry_status != 0) {
3296 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3297 "Failed to complete IOCB -- error status (%x).\n",
3299 rval = QLA_FUNCTION_FAILED;
3300 } else if (abt->nport_handle != cpu_to_le16(0)) {
3301 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3302 "Failed to complete IOCB -- completion status (%x).\n",
3303 le16_to_cpu(abt->nport_handle));
3304 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3305 rval = QLA_FUNCTION_PARAMETER_ERROR;
3307 rval = QLA_FUNCTION_FAILED;
3309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3310 "Done %s.\n", __func__);
3312 if (rval == QLA_SUCCESS)
3313 qla_nvme_abort_process_comp_status(abt, sp);
3315 qla_wait_nvme_release_cmd_kref(sp);
3317 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3322 struct tsk_mgmt_cmd {
3324 struct tsk_mgmt_entry tsk;
3325 struct sts_entry_24xx sts;
3330 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3331 uint64_t l, int tag)
3334 struct tsk_mgmt_cmd *tsk;
3335 struct sts_entry_24xx *sts;
3337 scsi_qla_host_t *vha;
3338 struct qla_hw_data *ha;
3339 struct req_que *req;
3340 struct qla_qpair *qpair;
3346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3347 "Entered %s.\n", __func__);
3349 if (vha->vp_idx && vha->qpair) {
3355 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3357 ql_log(ql_log_warn, vha, 0x1093,
3358 "Failed to allocate task management IOCB.\n");
3359 return QLA_MEMORY_ALLOC_FAILED;
3362 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3363 tsk->p.tsk.entry_count = 1;
3364 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3365 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3366 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3367 tsk->p.tsk.control_flags = cpu_to_le32(type);
3368 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3369 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3370 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3371 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3372 if (type == TCF_LUN_RESET) {
3373 int_to_scsilun(l, &tsk->p.tsk.lun);
3374 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3375 sizeof(tsk->p.tsk.lun));
3379 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3380 if (rval != QLA_SUCCESS) {
3381 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3382 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3383 } else if (sts->entry_status != 0) {
3384 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3385 "Failed to complete IOCB -- error status (%x).\n",
3387 rval = QLA_FUNCTION_FAILED;
3388 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3389 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3390 "Failed to complete IOCB -- completion status (%x).\n",
3391 le16_to_cpu(sts->comp_status));
3392 rval = QLA_FUNCTION_FAILED;
3393 } else if (le16_to_cpu(sts->scsi_status) &
3394 SS_RESPONSE_INFO_LEN_VALID) {
3395 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3396 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3397 "Ignoring inconsistent data length -- not enough "
3398 "response info (%d).\n",
3399 le32_to_cpu(sts->rsp_data_len));
3400 } else if (sts->data[3]) {
3401 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3402 "Failed to complete IOCB -- response (%x).\n",
3404 rval = QLA_FUNCTION_FAILED;
3408 /* Issue marker IOCB. */
3409 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3410 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3411 if (rval2 != QLA_SUCCESS) {
3412 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3413 "Failed to issue marker IOCB (%x).\n", rval2);
3415 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3416 "Done %s.\n", __func__);
3419 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3425 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3427 struct qla_hw_data *ha = fcport->vha->hw;
3429 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3430 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3432 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3436 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3438 struct qla_hw_data *ha = fcport->vha->hw;
3440 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3441 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3443 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3447 qla2x00_system_error(scsi_qla_host_t *vha)
3451 mbx_cmd_t *mcp = &mc;
3452 struct qla_hw_data *ha = vha->hw;
3454 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3455 return QLA_FUNCTION_FAILED;
3457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3458 "Entered %s.\n", __func__);
3460 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3461 mcp->out_mb = MBX_0;
3465 rval = qla2x00_mailbox_command(vha, mcp);
3467 if (rval != QLA_SUCCESS) {
3468 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3471 "Done %s.\n", __func__);
3478 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3482 mbx_cmd_t *mcp = &mc;
3484 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3485 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3486 return QLA_FUNCTION_FAILED;
3488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3489 "Entered %s.\n", __func__);
3491 mcp->mb[0] = MBC_WRITE_SERDES;
3493 if (IS_QLA2031(vha->hw))
3494 mcp->mb[2] = data & 0xff;
3499 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3501 mcp->tov = MBX_TOV_SECONDS;
3503 rval = qla2x00_mailbox_command(vha, mcp);
3505 if (rval != QLA_SUCCESS) {
3506 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3507 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3510 "Done %s.\n", __func__);
3517 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3521 mbx_cmd_t *mcp = &mc;
3523 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3524 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3525 return QLA_FUNCTION_FAILED;
3527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3528 "Entered %s.\n", __func__);
3530 mcp->mb[0] = MBC_READ_SERDES;
3533 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3534 mcp->in_mb = MBX_1|MBX_0;
3535 mcp->tov = MBX_TOV_SECONDS;
3537 rval = qla2x00_mailbox_command(vha, mcp);
3539 if (IS_QLA2031(vha->hw))
3540 *data = mcp->mb[1] & 0xff;
3544 if (rval != QLA_SUCCESS) {
3545 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3546 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3549 "Done %s.\n", __func__);
3556 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3560 mbx_cmd_t *mcp = &mc;
3562 if (!IS_QLA8044(vha->hw))
3563 return QLA_FUNCTION_FAILED;
3565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3566 "Entered %s.\n", __func__);
3568 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3569 mcp->mb[1] = HCS_WRITE_SERDES;
3570 mcp->mb[3] = LSW(addr);
3571 mcp->mb[4] = MSW(addr);
3572 mcp->mb[5] = LSW(data);
3573 mcp->mb[6] = MSW(data);
3574 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3576 mcp->tov = MBX_TOV_SECONDS;
3578 rval = qla2x00_mailbox_command(vha, mcp);
3580 if (rval != QLA_SUCCESS) {
3581 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3582 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3585 "Done %s.\n", __func__);
3592 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3596 mbx_cmd_t *mcp = &mc;
3598 if (!IS_QLA8044(vha->hw))
3599 return QLA_FUNCTION_FAILED;
3601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3602 "Entered %s.\n", __func__);
3604 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3605 mcp->mb[1] = HCS_READ_SERDES;
3606 mcp->mb[3] = LSW(addr);
3607 mcp->mb[4] = MSW(addr);
3608 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3609 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3610 mcp->tov = MBX_TOV_SECONDS;
3612 rval = qla2x00_mailbox_command(vha, mcp);
3614 *data = mcp->mb[2] << 16 | mcp->mb[1];
3616 if (rval != QLA_SUCCESS) {
3617 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3618 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3621 "Done %s.\n", __func__);
3628 * qla2x00_set_serdes_params() -
3630 * @sw_em_1g: serial link options
3631 * @sw_em_2g: serial link options
3632 * @sw_em_4g: serial link options
3637 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3638 uint16_t sw_em_2g, uint16_t sw_em_4g)
3642 mbx_cmd_t *mcp = &mc;
3644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3645 "Entered %s.\n", __func__);
3647 mcp->mb[0] = MBC_SERDES_PARAMS;
3649 mcp->mb[2] = sw_em_1g | BIT_15;
3650 mcp->mb[3] = sw_em_2g | BIT_15;
3651 mcp->mb[4] = sw_em_4g | BIT_15;
3652 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3654 mcp->tov = MBX_TOV_SECONDS;
3656 rval = qla2x00_mailbox_command(vha, mcp);
3658 if (rval != QLA_SUCCESS) {
3660 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3661 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3665 "Done %s.\n", __func__);
3672 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3676 mbx_cmd_t *mcp = &mc;
3678 if (!IS_FWI2_CAPABLE(vha->hw))
3679 return QLA_FUNCTION_FAILED;
3681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3682 "Entered %s.\n", __func__);
3684 mcp->mb[0] = MBC_STOP_FIRMWARE;
3686 mcp->out_mb = MBX_1|MBX_0;
3690 rval = qla2x00_mailbox_command(vha, mcp);
3692 if (rval != QLA_SUCCESS) {
3693 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3694 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3695 rval = QLA_INVALID_COMMAND;
3697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3698 "Done %s.\n", __func__);
3705 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3710 mbx_cmd_t *mcp = &mc;
3712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3713 "Entered %s.\n", __func__);
3715 if (!IS_FWI2_CAPABLE(vha->hw))
3716 return QLA_FUNCTION_FAILED;
3718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3719 return QLA_FUNCTION_FAILED;
3721 mcp->mb[0] = MBC_TRACE_CONTROL;
3722 mcp->mb[1] = TC_EFT_ENABLE;
3723 mcp->mb[2] = LSW(eft_dma);
3724 mcp->mb[3] = MSW(eft_dma);
3725 mcp->mb[4] = LSW(MSD(eft_dma));
3726 mcp->mb[5] = MSW(MSD(eft_dma));
3727 mcp->mb[6] = buffers;
3728 mcp->mb[7] = TC_AEN_DISABLE;
3729 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3730 mcp->in_mb = MBX_1|MBX_0;
3731 mcp->tov = MBX_TOV_SECONDS;
3733 rval = qla2x00_mailbox_command(vha, mcp);
3734 if (rval != QLA_SUCCESS) {
3735 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3736 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3737 rval, mcp->mb[0], mcp->mb[1]);
3739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3740 "Done %s.\n", __func__);
3747 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3751 mbx_cmd_t *mcp = &mc;
3753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3754 "Entered %s.\n", __func__);
3756 if (!IS_FWI2_CAPABLE(vha->hw))
3757 return QLA_FUNCTION_FAILED;
3759 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3760 return QLA_FUNCTION_FAILED;
3762 mcp->mb[0] = MBC_TRACE_CONTROL;
3763 mcp->mb[1] = TC_EFT_DISABLE;
3764 mcp->out_mb = MBX_1|MBX_0;
3765 mcp->in_mb = MBX_1|MBX_0;
3766 mcp->tov = MBX_TOV_SECONDS;
3768 rval = qla2x00_mailbox_command(vha, mcp);
3769 if (rval != QLA_SUCCESS) {
3770 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3771 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3772 rval, mcp->mb[0], mcp->mb[1]);
3774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3775 "Done %s.\n", __func__);
3782 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3783 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3787 mbx_cmd_t *mcp = &mc;
3789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3790 "Entered %s.\n", __func__);
3792 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3793 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3794 !IS_QLA28XX(vha->hw))
3795 return QLA_FUNCTION_FAILED;
3797 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3798 return QLA_FUNCTION_FAILED;
3800 mcp->mb[0] = MBC_TRACE_CONTROL;
3801 mcp->mb[1] = TC_FCE_ENABLE;
3802 mcp->mb[2] = LSW(fce_dma);
3803 mcp->mb[3] = MSW(fce_dma);
3804 mcp->mb[4] = LSW(MSD(fce_dma));
3805 mcp->mb[5] = MSW(MSD(fce_dma));
3806 mcp->mb[6] = buffers;
3807 mcp->mb[7] = TC_AEN_DISABLE;
3809 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3810 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3811 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3813 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3814 mcp->tov = MBX_TOV_SECONDS;
3816 rval = qla2x00_mailbox_command(vha, mcp);
3817 if (rval != QLA_SUCCESS) {
3818 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3819 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3820 rval, mcp->mb[0], mcp->mb[1]);
3822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3823 "Done %s.\n", __func__);
3826 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3835 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3839 mbx_cmd_t *mcp = &mc;
3841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3842 "Entered %s.\n", __func__);
3844 if (!IS_FWI2_CAPABLE(vha->hw))
3845 return QLA_FUNCTION_FAILED;
3847 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3848 return QLA_FUNCTION_FAILED;
3850 mcp->mb[0] = MBC_TRACE_CONTROL;
3851 mcp->mb[1] = TC_FCE_DISABLE;
3852 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3853 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3854 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3856 mcp->tov = MBX_TOV_SECONDS;
3858 rval = qla2x00_mailbox_command(vha, mcp);
3859 if (rval != QLA_SUCCESS) {
3860 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3861 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3862 rval, mcp->mb[0], mcp->mb[1]);
3864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3865 "Done %s.\n", __func__);
3868 *wr = (uint64_t) mcp->mb[5] << 48 |
3869 (uint64_t) mcp->mb[4] << 32 |
3870 (uint64_t) mcp->mb[3] << 16 |
3871 (uint64_t) mcp->mb[2];
3873 *rd = (uint64_t) mcp->mb[9] << 48 |
3874 (uint64_t) mcp->mb[8] << 32 |
3875 (uint64_t) mcp->mb[7] << 16 |
3876 (uint64_t) mcp->mb[6];
3883 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3884 uint16_t *port_speed, uint16_t *mb)
3888 mbx_cmd_t *mcp = &mc;
3890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3891 "Entered %s.\n", __func__);
3893 if (!IS_IIDMA_CAPABLE(vha->hw))
3894 return QLA_FUNCTION_FAILED;
3896 mcp->mb[0] = MBC_PORT_PARAMS;
3897 mcp->mb[1] = loop_id;
3898 mcp->mb[2] = mcp->mb[3] = 0;
3899 mcp->mb[9] = vha->vp_idx;
3900 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3901 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3902 mcp->tov = MBX_TOV_SECONDS;
3904 rval = qla2x00_mailbox_command(vha, mcp);
3906 /* Return mailbox statuses. */
3913 if (rval != QLA_SUCCESS) {
3914 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3916 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3917 "Done %s.\n", __func__);
3919 *port_speed = mcp->mb[3];
3926 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3927 uint16_t port_speed, uint16_t *mb)
3931 mbx_cmd_t *mcp = &mc;
3933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3934 "Entered %s.\n", __func__);
3936 if (!IS_IIDMA_CAPABLE(vha->hw))
3937 return QLA_FUNCTION_FAILED;
3939 mcp->mb[0] = MBC_PORT_PARAMS;
3940 mcp->mb[1] = loop_id;
3942 mcp->mb[3] = port_speed & 0x3F;
3943 mcp->mb[9] = vha->vp_idx;
3944 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3945 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3946 mcp->tov = MBX_TOV_SECONDS;
3948 rval = qla2x00_mailbox_command(vha, mcp);
3950 /* Return mailbox statuses. */
3957 if (rval != QLA_SUCCESS) {
3958 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3959 "Failed=%x.\n", rval);
3961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3962 "Done %s.\n", __func__);
3969 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3970 struct vp_rpt_id_entry_24xx *rptid_entry)
3972 struct qla_hw_data *ha = vha->hw;
3973 scsi_qla_host_t *vp = NULL;
3974 unsigned long flags;
3977 struct fc_port *fcport;
3979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3980 "Entered %s.\n", __func__);
3982 if (rptid_entry->entry_status != 0)
3985 id.b.domain = rptid_entry->port_id[2];
3986 id.b.area = rptid_entry->port_id[1];
3987 id.b.al_pa = rptid_entry->port_id[0];
3989 ha->flags.n2n_ae = 0;
3991 if (rptid_entry->format == 0) {
3993 ql_dbg(ql_dbg_async, vha, 0x10b7,
3994 "Format 0 : Number of VPs setup %d, number of "
3995 "VPs acquired %d.\n", rptid_entry->vp_setup,
3996 rptid_entry->vp_acquired);
3997 ql_dbg(ql_dbg_async, vha, 0x10b8,
3998 "Primary port id %02x%02x%02x.\n",
3999 rptid_entry->port_id[2], rptid_entry->port_id[1],
4000 rptid_entry->port_id[0]);
4001 ha->current_topology = ISP_CFG_NL;
4002 qlt_update_host_map(vha, id);
4004 } else if (rptid_entry->format == 1) {
4006 ql_dbg(ql_dbg_async, vha, 0x10b9,
4007 "Format 1: VP[%d] enabled - status %d - with "
4008 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4009 rptid_entry->vp_status,
4010 rptid_entry->port_id[2], rptid_entry->port_id[1],
4011 rptid_entry->port_id[0]);
4012 ql_dbg(ql_dbg_async, vha, 0x5075,
4013 "Format 1: Remote WWPN %8phC.\n",
4014 rptid_entry->u.f1.port_name);
4016 ql_dbg(ql_dbg_async, vha, 0x5075,
4017 "Format 1: WWPN %8phC.\n",
4020 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4022 ha->current_topology = ISP_CFG_N;
4023 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4024 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4025 fcport->scan_state = QLA_FCPORT_SCAN;
4026 fcport->n2n_flag = 0;
4029 if (wwn_to_u64(vha->port_name) >
4030 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4032 vha->d_id.b.al_pa = 1;
4033 ha->flags.n2n_bigger = 1;
4036 ql_dbg(ql_dbg_async, vha, 0x5075,
4037 "Format 1: assign local id %x remote id %x\n",
4038 vha->d_id.b24, id.b24);
4040 ql_dbg(ql_dbg_async, vha, 0x5075,
4041 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4042 rptid_entry->u.f1.port_name);
4043 ha->flags.n2n_bigger = 0;
4046 fcport = qla2x00_find_fcport_by_wwpn(vha,
4047 rptid_entry->u.f1.port_name, 1);
4048 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4052 fcport->plogi_nack_done_deadline = jiffies + HZ;
4053 fcport->dm_login_expire = jiffies +
4054 QLA_N2N_WAIT_TIME * HZ;
4055 fcport->scan_state = QLA_FCPORT_FOUND;
4056 fcport->n2n_flag = 1;
4057 fcport->keep_nport_handle = 1;
4058 fcport->login_retry = vha->hw->login_retry_count;
4059 fcport->fc4_type = FS_FC4TYPE_FCP;
4060 if (vha->flags.nvme_enabled)
4061 fcport->fc4_type |= FS_FC4TYPE_NVME;
4063 if (wwn_to_u64(vha->port_name) >
4064 wwn_to_u64(fcport->port_name)) {
4068 switch (fcport->disc_state) {
4070 set_bit(RELOGIN_NEEDED,
4073 case DSC_DELETE_PEND:
4076 qlt_schedule_sess_for_deletion(fcport);
4080 qla24xx_post_newsess_work(vha, &id,
4081 rptid_entry->u.f1.port_name,
4082 rptid_entry->u.f1.node_name,
4087 /* if our portname is higher then initiate N2N login */
4089 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4092 ha->current_topology = ISP_CFG_FL;
4095 ha->current_topology = ISP_CFG_F;
4101 ha->flags.gpsc_supported = 1;
4102 ha->current_topology = ISP_CFG_F;
4103 /* buffer to buffer credit flag */
4104 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4106 if (rptid_entry->vp_idx == 0) {
4107 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4108 /* FA-WWN is only for physical port */
4109 if (qla_ini_mode_enabled(vha) &&
4110 ha->flags.fawwpn_enabled &&
4111 (rptid_entry->u.f1.flags &
4113 memcpy(vha->port_name,
4114 rptid_entry->u.f1.port_name,
4118 qlt_update_host_map(vha, id);
4121 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4122 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4124 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4125 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4126 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4127 "Could not acquire ID for VP[%d].\n",
4128 rptid_entry->vp_idx);
4133 spin_lock_irqsave(&ha->vport_slock, flags);
4134 list_for_each_entry(vp, &ha->vp_list, list) {
4135 if (rptid_entry->vp_idx == vp->vp_idx) {
4140 spin_unlock_irqrestore(&ha->vport_slock, flags);
4145 qlt_update_host_map(vp, id);
4148 * Cannot configure here as we are still sitting on the
4149 * response queue. Handle it in dpc context.
4151 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4152 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4153 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4155 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4156 qla2xxx_wake_dpc(vha);
4157 } else if (rptid_entry->format == 2) {
4158 ql_dbg(ql_dbg_async, vha, 0x505f,
4159 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4160 rptid_entry->port_id[2], rptid_entry->port_id[1],
4161 rptid_entry->port_id[0]);
4163 ql_dbg(ql_dbg_async, vha, 0x5075,
4164 "N2N: Remote WWPN %8phC.\n",
4165 rptid_entry->u.f2.port_name);
4167 /* N2N. direct connect */
4168 ha->current_topology = ISP_CFG_N;
4169 ha->flags.rida_fmt2 = 1;
4170 vha->d_id.b.domain = rptid_entry->port_id[2];
4171 vha->d_id.b.area = rptid_entry->port_id[1];
4172 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4174 ha->flags.n2n_ae = 1;
4175 spin_lock_irqsave(&ha->vport_slock, flags);
4176 qlt_update_vp_map(vha, SET_AL_PA);
4177 spin_unlock_irqrestore(&ha->vport_slock, flags);
4179 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4180 fcport->scan_state = QLA_FCPORT_SCAN;
4181 fcport->n2n_flag = 0;
4184 fcport = qla2x00_find_fcport_by_wwpn(vha,
4185 rptid_entry->u.f2.port_name, 1);
4188 fcport->login_retry = vha->hw->login_retry_count;
4189 fcport->plogi_nack_done_deadline = jiffies + HZ;
4190 fcport->scan_state = QLA_FCPORT_FOUND;
4191 fcport->keep_nport_handle = 1;
4192 fcport->n2n_flag = 1;
4193 fcport->d_id.b.domain =
4194 rptid_entry->u.f2.remote_nport_id[2];
4195 fcport->d_id.b.area =
4196 rptid_entry->u.f2.remote_nport_id[1];
4197 fcport->d_id.b.al_pa =
4198 rptid_entry->u.f2.remote_nport_id[0];
4201 * For the case where remote port sending PRLO, FW
4202 * sends up RIDA Format 2 as an indication of session
4203 * loss. In other word, FW state change from PRLI
4204 * complete back to PLOGI complete. Delete the
4205 * session and let relogin drive the reconnect.
4207 if (atomic_read(&fcport->state) == FCS_ONLINE)
4208 qlt_schedule_sess_for_deletion(fcport);
4214 * qla24xx_modify_vp_config
4215 * Change VP configuration for vha
4218 * vha = adapter block pointer.
4221 * qla2xxx local function return status code.
4227 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4230 struct vp_config_entry_24xx *vpmod;
4231 dma_addr_t vpmod_dma;
4232 struct qla_hw_data *ha = vha->hw;
4233 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4235 /* This can be called by the parent */
4237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4238 "Entered %s.\n", __func__);
4240 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4242 ql_log(ql_log_warn, vha, 0x10bc,
4243 "Failed to allocate modify VP IOCB.\n");
4244 return QLA_MEMORY_ALLOC_FAILED;
4247 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4248 vpmod->entry_count = 1;
4249 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4250 vpmod->vp_count = 1;
4251 vpmod->vp_index1 = vha->vp_idx;
4252 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4254 qlt_modify_vp_config(vha, vpmod);
4256 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4257 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4258 vpmod->entry_count = 1;
4260 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4261 if (rval != QLA_SUCCESS) {
4262 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4263 "Failed to issue VP config IOCB (%x).\n", rval);
4264 } else if (vpmod->comp_status != 0) {
4265 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4266 "Failed to complete IOCB -- error status (%x).\n",
4267 vpmod->comp_status);
4268 rval = QLA_FUNCTION_FAILED;
4269 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4270 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4271 "Failed to complete IOCB -- completion status (%x).\n",
4272 le16_to_cpu(vpmod->comp_status));
4273 rval = QLA_FUNCTION_FAILED;
4276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4277 "Done %s.\n", __func__);
4278 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4280 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4286 * qla2x00_send_change_request
4287 * Receive or disable RSCN request from fabric controller
4290 * ha = adapter block pointer
4291 * format = registration format:
4293 * 1 - Fabric detected registration
4294 * 2 - N_port detected registration
4295 * 3 - Full registration
4296 * FF - clear registration
4297 * vp_idx = Virtual port index
4300 * qla2x00 local function return status code.
4307 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4312 mbx_cmd_t *mcp = &mc;
4314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4315 "Entered %s.\n", __func__);
4317 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4318 mcp->mb[1] = format;
4319 mcp->mb[9] = vp_idx;
4320 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4321 mcp->in_mb = MBX_0|MBX_1;
4322 mcp->tov = MBX_TOV_SECONDS;
4324 rval = qla2x00_mailbox_command(vha, mcp);
4326 if (rval == QLA_SUCCESS) {
4327 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4337 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4342 mbx_cmd_t *mcp = &mc;
4344 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4345 "Entered %s.\n", __func__);
4347 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4348 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4349 mcp->mb[8] = MSW(addr);
4351 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4353 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4354 mcp->out_mb = MBX_0;
4356 mcp->mb[1] = LSW(addr);
4357 mcp->mb[2] = MSW(req_dma);
4358 mcp->mb[3] = LSW(req_dma);
4359 mcp->mb[6] = MSW(MSD(req_dma));
4360 mcp->mb[7] = LSW(MSD(req_dma));
4361 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4362 if (IS_FWI2_CAPABLE(vha->hw)) {
4363 mcp->mb[4] = MSW(size);
4364 mcp->mb[5] = LSW(size);
4365 mcp->out_mb |= MBX_5|MBX_4;
4367 mcp->mb[4] = LSW(size);
4368 mcp->out_mb |= MBX_4;
4372 mcp->tov = MBX_TOV_SECONDS;
4374 rval = qla2x00_mailbox_command(vha, mcp);
4376 if (rval != QLA_SUCCESS) {
4377 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4378 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4381 "Done %s.\n", __func__);
4386 /* 84XX Support **************************************************************/
4388 struct cs84xx_mgmt_cmd {
4390 struct verify_chip_entry_84xx req;
4391 struct verify_chip_rsp_84xx rsp;
4396 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4399 struct cs84xx_mgmt_cmd *mn;
4402 unsigned long flags;
4403 struct qla_hw_data *ha = vha->hw;
4405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4406 "Entered %s.\n", __func__);
4408 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4410 return QLA_MEMORY_ALLOC_FAILED;
4414 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4415 /* Diagnostic firmware? */
4416 /* options |= MENLO_DIAG_FW; */
4417 /* We update the firmware with only one data sequence. */
4418 options |= VCO_END_OF_DATA;
4422 memset(mn, 0, sizeof(*mn));
4423 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4424 mn->p.req.entry_count = 1;
4425 mn->p.req.options = cpu_to_le16(options);
4427 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4428 "Dump of Verify Request.\n");
4429 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4432 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4433 if (rval != QLA_SUCCESS) {
4434 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4435 "Failed to issue verify IOCB (%x).\n", rval);
4439 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4440 "Dump of Verify Response.\n");
4441 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4444 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4445 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4446 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4448 "cs=%x fc=%x.\n", status[0], status[1]);
4450 if (status[0] != CS_COMPLETE) {
4451 rval = QLA_FUNCTION_FAILED;
4452 if (!(options & VCO_DONT_UPDATE_FW)) {
4453 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4454 "Firmware update failed. Retrying "
4455 "without update firmware.\n");
4456 options |= VCO_DONT_UPDATE_FW;
4457 options &= ~VCO_FORCE_UPDATE;
4461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4462 "Firmware updated to %x.\n",
4463 le32_to_cpu(mn->p.rsp.fw_ver));
4465 /* NOTE: we only update OP firmware. */
4466 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4467 ha->cs84xx->op_fw_version =
4468 le32_to_cpu(mn->p.rsp.fw_ver);
4469 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4475 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4477 if (rval != QLA_SUCCESS) {
4478 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4479 "Failed=%x.\n", rval);
4481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4482 "Done %s.\n", __func__);
4489 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4492 unsigned long flags;
4494 mbx_cmd_t *mcp = &mc;
4495 struct qla_hw_data *ha = vha->hw;
4497 if (!ha->flags.fw_started)
4500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4501 "Entered %s.\n", __func__);
4503 if (IS_SHADOW_REG_CAPABLE(ha))
4504 req->options |= BIT_13;
4506 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4507 mcp->mb[1] = req->options;
4508 mcp->mb[2] = MSW(LSD(req->dma));
4509 mcp->mb[3] = LSW(LSD(req->dma));
4510 mcp->mb[6] = MSW(MSD(req->dma));
4511 mcp->mb[7] = LSW(MSD(req->dma));
4512 mcp->mb[5] = req->length;
4514 mcp->mb[10] = req->rsp->id;
4515 mcp->mb[12] = req->qos;
4516 mcp->mb[11] = req->vp_idx;
4517 mcp->mb[13] = req->rid;
4518 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4521 mcp->mb[4] = req->id;
4522 /* que in ptr index */
4524 /* que out ptr index */
4525 mcp->mb[9] = *req->out_ptr = 0;
4526 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4527 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4529 mcp->flags = MBX_DMA_OUT;
4530 mcp->tov = MBX_TOV_SECONDS * 2;
4532 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4534 mcp->in_mb |= MBX_1;
4535 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4536 mcp->out_mb |= MBX_15;
4537 /* debug q create issue in SR-IOV */
4538 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4541 spin_lock_irqsave(&ha->hardware_lock, flags);
4542 if (!(req->options & BIT_0)) {
4543 wrt_reg_dword(req->req_q_in, 0);
4544 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4545 wrt_reg_dword(req->req_q_out, 0);
4547 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4549 rval = qla2x00_mailbox_command(vha, mcp);
4550 if (rval != QLA_SUCCESS) {
4551 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4552 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4555 "Done %s.\n", __func__);
4562 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4565 unsigned long flags;
4567 mbx_cmd_t *mcp = &mc;
4568 struct qla_hw_data *ha = vha->hw;
4570 if (!ha->flags.fw_started)
4573 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4574 "Entered %s.\n", __func__);
4576 if (IS_SHADOW_REG_CAPABLE(ha))
4577 rsp->options |= BIT_13;
4579 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4580 mcp->mb[1] = rsp->options;
4581 mcp->mb[2] = MSW(LSD(rsp->dma));
4582 mcp->mb[3] = LSW(LSD(rsp->dma));
4583 mcp->mb[6] = MSW(MSD(rsp->dma));
4584 mcp->mb[7] = LSW(MSD(rsp->dma));
4585 mcp->mb[5] = rsp->length;
4586 mcp->mb[14] = rsp->msix->entry;
4587 mcp->mb[13] = rsp->rid;
4588 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4591 mcp->mb[4] = rsp->id;
4592 /* que in ptr index */
4593 mcp->mb[8] = *rsp->in_ptr = 0;
4594 /* que out ptr index */
4596 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4597 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4599 mcp->flags = MBX_DMA_OUT;
4600 mcp->tov = MBX_TOV_SECONDS * 2;
4602 if (IS_QLA81XX(ha)) {
4603 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4604 mcp->in_mb |= MBX_1;
4605 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4606 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4607 mcp->in_mb |= MBX_1;
4608 /* debug q create issue in SR-IOV */
4609 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4612 spin_lock_irqsave(&ha->hardware_lock, flags);
4613 if (!(rsp->options & BIT_0)) {
4614 wrt_reg_dword(rsp->rsp_q_out, 0);
4615 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4616 wrt_reg_dword(rsp->rsp_q_in, 0);
4619 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4621 rval = qla2x00_mailbox_command(vha, mcp);
4622 if (rval != QLA_SUCCESS) {
4623 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4624 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4627 "Done %s.\n", __func__);
4634 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4638 mbx_cmd_t *mcp = &mc;
4640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4641 "Entered %s.\n", __func__);
4643 mcp->mb[0] = MBC_IDC_ACK;
4644 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4645 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4647 mcp->tov = MBX_TOV_SECONDS;
4649 rval = qla2x00_mailbox_command(vha, mcp);
4651 if (rval != QLA_SUCCESS) {
4652 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4653 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4656 "Done %s.\n", __func__);
4663 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4667 mbx_cmd_t *mcp = &mc;
4669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4670 "Entered %s.\n", __func__);
4672 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4673 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4674 return QLA_FUNCTION_FAILED;
4676 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4677 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4678 mcp->out_mb = MBX_1|MBX_0;
4679 mcp->in_mb = MBX_1|MBX_0;
4680 mcp->tov = MBX_TOV_SECONDS;
4682 rval = qla2x00_mailbox_command(vha, mcp);
4684 if (rval != QLA_SUCCESS) {
4685 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4686 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4687 rval, mcp->mb[0], mcp->mb[1]);
4689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4690 "Done %s.\n", __func__);
4691 *sector_size = mcp->mb[1];
4698 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4702 mbx_cmd_t *mcp = &mc;
4704 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4705 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4706 return QLA_FUNCTION_FAILED;
4708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4709 "Entered %s.\n", __func__);
4711 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4712 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4713 FAC_OPT_CMD_WRITE_PROTECT;
4714 mcp->out_mb = MBX_1|MBX_0;
4715 mcp->in_mb = MBX_1|MBX_0;
4716 mcp->tov = MBX_TOV_SECONDS;
4718 rval = qla2x00_mailbox_command(vha, mcp);
4720 if (rval != QLA_SUCCESS) {
4721 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4722 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4723 rval, mcp->mb[0], mcp->mb[1]);
4725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4726 "Done %s.\n", __func__);
4733 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4737 mbx_cmd_t *mcp = &mc;
4739 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4740 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4741 return QLA_FUNCTION_FAILED;
4743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4744 "Entered %s.\n", __func__);
4746 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4747 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4748 mcp->mb[2] = LSW(start);
4749 mcp->mb[3] = MSW(start);
4750 mcp->mb[4] = LSW(finish);
4751 mcp->mb[5] = MSW(finish);
4752 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4753 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4754 mcp->tov = MBX_TOV_SECONDS;
4756 rval = qla2x00_mailbox_command(vha, mcp);
4758 if (rval != QLA_SUCCESS) {
4759 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4760 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4761 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4764 "Done %s.\n", __func__);
4771 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4773 int rval = QLA_SUCCESS;
4775 mbx_cmd_t *mcp = &mc;
4776 struct qla_hw_data *ha = vha->hw;
4778 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4779 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4783 "Entered %s.\n", __func__);
4785 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4786 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4787 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4788 mcp->out_mb = MBX_1|MBX_0;
4789 mcp->in_mb = MBX_1|MBX_0;
4790 mcp->tov = MBX_TOV_SECONDS;
4792 rval = qla2x00_mailbox_command(vha, mcp);
4794 if (rval != QLA_SUCCESS) {
4795 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4796 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4797 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4800 "Done %s.\n", __func__);
4807 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4811 mbx_cmd_t *mcp = &mc;
4813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4814 "Entered %s.\n", __func__);
4816 mcp->mb[0] = MBC_RESTART_MPI_FW;
4817 mcp->out_mb = MBX_0;
4818 mcp->in_mb = MBX_0|MBX_1;
4819 mcp->tov = MBX_TOV_SECONDS;
4821 rval = qla2x00_mailbox_command(vha, mcp);
4823 if (rval != QLA_SUCCESS) {
4824 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4825 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4826 rval, mcp->mb[0], mcp->mb[1]);
4828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4829 "Done %s.\n", __func__);
4836 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4840 mbx_cmd_t *mcp = &mc;
4844 struct qla_hw_data *ha = vha->hw;
4846 if (!IS_P3P_TYPE(ha))
4847 return QLA_FUNCTION_FAILED;
4849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4850 "Entered %s.\n", __func__);
4852 str = (__force __le16 *)version;
4853 len = strlen(version);
4855 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4856 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4857 mcp->out_mb = MBX_1|MBX_0;
4858 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4859 mcp->mb[i] = le16_to_cpup(str);
4860 mcp->out_mb |= 1<<i;
4862 for (; i < 16; i++) {
4864 mcp->out_mb |= 1<<i;
4866 mcp->in_mb = MBX_1|MBX_0;
4867 mcp->tov = MBX_TOV_SECONDS;
4869 rval = qla2x00_mailbox_command(vha, mcp);
4871 if (rval != QLA_SUCCESS) {
4872 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4873 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4876 "Done %s.\n", __func__);
4883 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4887 mbx_cmd_t *mcp = &mc;
4892 struct qla_hw_data *ha = vha->hw;
4894 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4896 return QLA_FUNCTION_FAILED;
4898 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4899 "Entered %s.\n", __func__);
4901 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4903 ql_log(ql_log_warn, vha, 0x117f,
4904 "Failed to allocate driver version param.\n");
4905 return QLA_MEMORY_ALLOC_FAILED;
4908 memcpy(str, "\x7\x3\x11\x0", 4);
4910 len = dwlen * 4 - 4;
4911 memset(str + 4, 0, len);
4912 if (len > strlen(version))
4913 len = strlen(version);
4914 memcpy(str + 4, version, len);
4916 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4917 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4918 mcp->mb[2] = MSW(LSD(str_dma));
4919 mcp->mb[3] = LSW(LSD(str_dma));
4920 mcp->mb[6] = MSW(MSD(str_dma));
4921 mcp->mb[7] = LSW(MSD(str_dma));
4922 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4923 mcp->in_mb = MBX_1|MBX_0;
4924 mcp->tov = MBX_TOV_SECONDS;
4926 rval = qla2x00_mailbox_command(vha, mcp);
4928 if (rval != QLA_SUCCESS) {
4929 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4930 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4933 "Done %s.\n", __func__);
4936 dma_pool_free(ha->s_dma_pool, str, str_dma);
4942 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4943 void *buf, uint16_t bufsiz)
4947 mbx_cmd_t *mcp = &mc;
4950 if (!IS_FWI2_CAPABLE(vha->hw))
4951 return QLA_FUNCTION_FAILED;
4953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4954 "Entered %s.\n", __func__);
4956 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4957 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4958 mcp->mb[2] = MSW(buf_dma);
4959 mcp->mb[3] = LSW(buf_dma);
4960 mcp->mb[6] = MSW(MSD(buf_dma));
4961 mcp->mb[7] = LSW(MSD(buf_dma));
4962 mcp->mb[8] = bufsiz/4;
4963 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4964 mcp->in_mb = MBX_1|MBX_0;
4965 mcp->tov = MBX_TOV_SECONDS;
4967 rval = qla2x00_mailbox_command(vha, mcp);
4969 if (rval != QLA_SUCCESS) {
4970 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4971 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4974 "Done %s.\n", __func__);
4975 bp = (uint32_t *) buf;
4976 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4977 *bp = le32_to_cpu((__force __le32)*bp);
4983 #define PUREX_CMD_COUNT 4
4985 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4989 mbx_cmd_t *mcp = &mc;
4990 uint8_t *els_cmd_map;
4991 uint8_t active_cnt = 0;
4992 dma_addr_t els_cmd_map_dma;
4993 uint8_t cmd_opcode[PUREX_CMD_COUNT];
4994 uint8_t i, index, purex_bit;
4995 struct qla_hw_data *ha = vha->hw;
4997 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
4998 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5002 "Entered %s.\n", __func__);
5004 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5005 &els_cmd_map_dma, GFP_KERNEL);
5007 ql_log(ql_log_warn, vha, 0x7101,
5008 "Failed to allocate RDP els command param.\n");
5009 return QLA_MEMORY_ALLOC_FAILED;
5012 /* List of Purex ELS */
5013 if (ql2xrdpenable) {
5014 cmd_opcode[active_cnt] = ELS_RDP;
5017 if (ha->flags.scm_supported_f) {
5018 cmd_opcode[active_cnt] = ELS_FPIN;
5021 if (ha->flags.edif_enabled) {
5022 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5026 for (i = 0; i < active_cnt; i++) {
5027 index = cmd_opcode[i] / 8;
5028 purex_bit = cmd_opcode[i] % 8;
5029 els_cmd_map[index] |= 1 << purex_bit;
5032 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5033 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5034 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5035 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5036 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5037 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5038 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5039 mcp->in_mb = MBX_1|MBX_0;
5040 mcp->tov = MBX_TOV_SECONDS;
5041 mcp->flags = MBX_DMA_OUT;
5042 mcp->buf_size = ELS_CMD_MAP_SIZE;
5043 rval = qla2x00_mailbox_command(vha, mcp);
5045 if (rval != QLA_SUCCESS) {
5046 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5047 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5049 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5050 "Done %s.\n", __func__);
5053 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5054 els_cmd_map, els_cmd_map_dma);
5060 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5064 mbx_cmd_t *mcp = &mc;
5066 if (!IS_FWI2_CAPABLE(vha->hw))
5067 return QLA_FUNCTION_FAILED;
5069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5070 "Entered %s.\n", __func__);
5072 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5073 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5074 mcp->out_mb = MBX_1|MBX_0;
5075 mcp->in_mb = MBX_1|MBX_0;
5076 mcp->tov = MBX_TOV_SECONDS;
5078 rval = qla2x00_mailbox_command(vha, mcp);
5081 if (rval != QLA_SUCCESS) {
5082 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5083 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5086 "Done %s.\n", __func__);
5093 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5094 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5098 mbx_cmd_t *mcp = &mc;
5099 struct qla_hw_data *ha = vha->hw;
5101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5102 "Entered %s.\n", __func__);
5104 if (!IS_FWI2_CAPABLE(ha))
5105 return QLA_FUNCTION_FAILED;
5110 mcp->mb[0] = MBC_READ_SFP;
5112 mcp->mb[2] = MSW(LSD(sfp_dma));
5113 mcp->mb[3] = LSW(LSD(sfp_dma));
5114 mcp->mb[6] = MSW(MSD(sfp_dma));
5115 mcp->mb[7] = LSW(MSD(sfp_dma));
5119 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5120 mcp->in_mb = MBX_1|MBX_0;
5121 mcp->tov = MBX_TOV_SECONDS;
5123 rval = qla2x00_mailbox_command(vha, mcp);
5128 if (rval != QLA_SUCCESS) {
5129 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5130 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5131 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5132 /* sfp is not there */
5133 rval = QLA_INTERFACE_ERROR;
5136 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5137 "Done %s.\n", __func__);
5144 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5145 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5149 mbx_cmd_t *mcp = &mc;
5150 struct qla_hw_data *ha = vha->hw;
5152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5153 "Entered %s.\n", __func__);
5155 if (!IS_FWI2_CAPABLE(ha))
5156 return QLA_FUNCTION_FAILED;
5164 mcp->mb[0] = MBC_WRITE_SFP;
5166 mcp->mb[2] = MSW(LSD(sfp_dma));
5167 mcp->mb[3] = LSW(LSD(sfp_dma));
5168 mcp->mb[6] = MSW(MSD(sfp_dma));
5169 mcp->mb[7] = LSW(MSD(sfp_dma));
5173 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5174 mcp->in_mb = MBX_1|MBX_0;
5175 mcp->tov = MBX_TOV_SECONDS;
5177 rval = qla2x00_mailbox_command(vha, mcp);
5179 if (rval != QLA_SUCCESS) {
5180 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5181 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5184 "Done %s.\n", __func__);
5191 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5192 uint16_t size_in_bytes, uint16_t *actual_size)
5196 mbx_cmd_t *mcp = &mc;
5198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5199 "Entered %s.\n", __func__);
5201 if (!IS_CNA_CAPABLE(vha->hw))
5202 return QLA_FUNCTION_FAILED;
5204 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5205 mcp->mb[2] = MSW(stats_dma);
5206 mcp->mb[3] = LSW(stats_dma);
5207 mcp->mb[6] = MSW(MSD(stats_dma));
5208 mcp->mb[7] = LSW(MSD(stats_dma));
5209 mcp->mb[8] = size_in_bytes >> 2;
5210 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5211 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5212 mcp->tov = MBX_TOV_SECONDS;
5214 rval = qla2x00_mailbox_command(vha, mcp);
5216 if (rval != QLA_SUCCESS) {
5217 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5218 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5219 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5222 "Done %s.\n", __func__);
5225 *actual_size = mcp->mb[2] << 2;
5232 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5237 mbx_cmd_t *mcp = &mc;
5239 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5240 "Entered %s.\n", __func__);
5242 if (!IS_CNA_CAPABLE(vha->hw))
5243 return QLA_FUNCTION_FAILED;
5245 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5247 mcp->mb[2] = MSW(tlv_dma);
5248 mcp->mb[3] = LSW(tlv_dma);
5249 mcp->mb[6] = MSW(MSD(tlv_dma));
5250 mcp->mb[7] = LSW(MSD(tlv_dma));
5252 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5253 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5254 mcp->tov = MBX_TOV_SECONDS;
5256 rval = qla2x00_mailbox_command(vha, mcp);
5258 if (rval != QLA_SUCCESS) {
5259 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5260 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5261 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5264 "Done %s.\n", __func__);
5271 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5275 mbx_cmd_t *mcp = &mc;
5277 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5278 "Entered %s.\n", __func__);
5280 if (!IS_FWI2_CAPABLE(vha->hw))
5281 return QLA_FUNCTION_FAILED;
5283 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5284 mcp->mb[1] = LSW(risc_addr);
5285 mcp->mb[8] = MSW(risc_addr);
5286 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5287 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5288 mcp->tov = MBX_TOV_SECONDS;
5290 rval = qla2x00_mailbox_command(vha, mcp);
5291 if (rval != QLA_SUCCESS) {
5292 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5293 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5296 "Done %s.\n", __func__);
5297 *data = mcp->mb[3] << 16 | mcp->mb[2];
5304 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5309 mbx_cmd_t *mcp = &mc;
5311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5312 "Entered %s.\n", __func__);
5314 memset(mcp->mb, 0 , sizeof(mcp->mb));
5315 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5316 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5318 /* transfer count */
5319 mcp->mb[10] = LSW(mreq->transfer_size);
5320 mcp->mb[11] = MSW(mreq->transfer_size);
5322 /* send data address */
5323 mcp->mb[14] = LSW(mreq->send_dma);
5324 mcp->mb[15] = MSW(mreq->send_dma);
5325 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5326 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5328 /* receive data address */
5329 mcp->mb[16] = LSW(mreq->rcv_dma);
5330 mcp->mb[17] = MSW(mreq->rcv_dma);
5331 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5332 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5334 /* Iteration count */
5335 mcp->mb[18] = LSW(mreq->iteration_count);
5336 mcp->mb[19] = MSW(mreq->iteration_count);
5338 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5339 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5340 if (IS_CNA_CAPABLE(vha->hw))
5341 mcp->out_mb |= MBX_2;
5342 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5344 mcp->buf_size = mreq->transfer_size;
5345 mcp->tov = MBX_TOV_SECONDS;
5346 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5348 rval = qla2x00_mailbox_command(vha, mcp);
5350 if (rval != QLA_SUCCESS) {
5351 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5352 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5353 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5354 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5357 "Done %s.\n", __func__);
5360 /* Copy mailbox information */
5361 memcpy( mresp, mcp->mb, 64);
5366 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5371 mbx_cmd_t *mcp = &mc;
5372 struct qla_hw_data *ha = vha->hw;
5374 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5375 "Entered %s.\n", __func__);
5377 memset(mcp->mb, 0 , sizeof(mcp->mb));
5378 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5379 /* BIT_6 specifies 64bit address */
5380 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5381 if (IS_CNA_CAPABLE(ha)) {
5382 mcp->mb[2] = vha->fcoe_fcf_idx;
5384 mcp->mb[16] = LSW(mreq->rcv_dma);
5385 mcp->mb[17] = MSW(mreq->rcv_dma);
5386 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5387 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5389 mcp->mb[10] = LSW(mreq->transfer_size);
5391 mcp->mb[14] = LSW(mreq->send_dma);
5392 mcp->mb[15] = MSW(mreq->send_dma);
5393 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5394 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5396 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5397 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5398 if (IS_CNA_CAPABLE(ha))
5399 mcp->out_mb |= MBX_2;
5402 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5403 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5404 mcp->in_mb |= MBX_1;
5405 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5407 mcp->in_mb |= MBX_3;
5409 mcp->tov = MBX_TOV_SECONDS;
5410 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5411 mcp->buf_size = mreq->transfer_size;
5413 rval = qla2x00_mailbox_command(vha, mcp);
5415 if (rval != QLA_SUCCESS) {
5416 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5417 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5418 rval, mcp->mb[0], mcp->mb[1]);
5420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5421 "Done %s.\n", __func__);
5424 /* Copy mailbox information */
5425 memcpy(mresp, mcp->mb, 64);
5430 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5434 mbx_cmd_t *mcp = &mc;
5436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5437 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5439 mcp->mb[0] = MBC_ISP84XX_RESET;
5440 mcp->mb[1] = enable_diagnostic;
5441 mcp->out_mb = MBX_1|MBX_0;
5442 mcp->in_mb = MBX_1|MBX_0;
5443 mcp->tov = MBX_TOV_SECONDS;
5444 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5445 rval = qla2x00_mailbox_command(vha, mcp);
5447 if (rval != QLA_SUCCESS)
5448 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5451 "Done %s.\n", __func__);
5457 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5461 mbx_cmd_t *mcp = &mc;
5463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5464 "Entered %s.\n", __func__);
5466 if (!IS_FWI2_CAPABLE(vha->hw))
5467 return QLA_FUNCTION_FAILED;
5469 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5470 mcp->mb[1] = LSW(risc_addr);
5471 mcp->mb[2] = LSW(data);
5472 mcp->mb[3] = MSW(data);
5473 mcp->mb[8] = MSW(risc_addr);
5474 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5475 mcp->in_mb = MBX_1|MBX_0;
5476 mcp->tov = MBX_TOV_SECONDS;
5478 rval = qla2x00_mailbox_command(vha, mcp);
5479 if (rval != QLA_SUCCESS) {
5480 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5481 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5482 rval, mcp->mb[0], mcp->mb[1]);
5484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5485 "Done %s.\n", __func__);
5492 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5495 uint32_t stat, timer;
5497 struct qla_hw_data *ha = vha->hw;
5498 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5503 "Entered %s.\n", __func__);
5505 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5507 /* Write the MBC data to the registers */
5508 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5509 wrt_reg_word(®->mailbox1, mb[0]);
5510 wrt_reg_word(®->mailbox2, mb[1]);
5511 wrt_reg_word(®->mailbox3, mb[2]);
5512 wrt_reg_word(®->mailbox4, mb[3]);
5514 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5516 /* Poll for MBC interrupt */
5517 for (timer = 6000000; timer; timer--) {
5518 /* Check for pending interrupts. */
5519 stat = rd_reg_dword(®->host_status);
5520 if (stat & HSRX_RISC_INT) {
5523 if (stat == 0x1 || stat == 0x2 ||
5524 stat == 0x10 || stat == 0x11) {
5525 set_bit(MBX_INTERRUPT,
5526 &ha->mbx_cmd_flags);
5527 mb0 = rd_reg_word(®->mailbox0);
5528 wrt_reg_dword(®->hccr,
5529 HCCRX_CLR_RISC_INT);
5530 rd_reg_dword(®->hccr);
5537 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5538 rval = mb0 & MBS_MASK;
5540 rval = QLA_FUNCTION_FAILED;
5542 if (rval != QLA_SUCCESS) {
5543 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5544 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5547 "Done %s.\n", __func__);
5553 /* Set the specified data rate */
5555 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5559 mbx_cmd_t *mcp = &mc;
5560 struct qla_hw_data *ha = vha->hw;
5563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5564 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5567 if (!IS_FWI2_CAPABLE(ha))
5568 return QLA_FUNCTION_FAILED;
5570 memset(mcp, 0, sizeof(*mcp));
5571 switch (ha->set_data_rate) {
5572 case PORT_SPEED_AUTO:
5573 case PORT_SPEED_4GB:
5574 case PORT_SPEED_8GB:
5575 case PORT_SPEED_16GB:
5576 case PORT_SPEED_32GB:
5577 val = ha->set_data_rate;
5580 ql_log(ql_log_warn, vha, 0x1199,
5581 "Unrecognized speed setting:%d. Setting Autoneg\n",
5583 val = ha->set_data_rate = PORT_SPEED_AUTO;
5587 mcp->mb[0] = MBC_DATA_RATE;
5591 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5592 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5593 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5594 mcp->in_mb |= MBX_4|MBX_3;
5595 mcp->tov = MBX_TOV_SECONDS;
5597 rval = qla2x00_mailbox_command(vha, mcp);
5598 if (rval != QLA_SUCCESS) {
5599 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5600 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5602 if (mcp->mb[1] != 0x7)
5603 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5604 "Speed set:0x%x\n", mcp->mb[1]);
5606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5607 "Done %s.\n", __func__);
5614 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5618 mbx_cmd_t *mcp = &mc;
5619 struct qla_hw_data *ha = vha->hw;
5621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5622 "Entered %s.\n", __func__);
5624 if (!IS_FWI2_CAPABLE(ha))
5625 return QLA_FUNCTION_FAILED;
5627 mcp->mb[0] = MBC_DATA_RATE;
5628 mcp->mb[1] = QLA_GET_DATA_RATE;
5629 mcp->out_mb = MBX_1|MBX_0;
5630 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5631 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5632 mcp->in_mb |= MBX_4|MBX_3;
5633 mcp->tov = MBX_TOV_SECONDS;
5635 rval = qla2x00_mailbox_command(vha, mcp);
5636 if (rval != QLA_SUCCESS) {
5637 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5638 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5640 if (mcp->mb[1] != 0x7)
5641 ha->link_data_rate = mcp->mb[1];
5643 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5644 if (mcp->mb[4] & BIT_0)
5645 ql_log(ql_log_info, vha, 0x11a2,
5646 "FEC=enabled (data rate).\n");
5649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5650 "Done %s.\n", __func__);
5651 if (mcp->mb[1] != 0x7)
5652 ha->link_data_rate = mcp->mb[1];
5659 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5663 mbx_cmd_t *mcp = &mc;
5664 struct qla_hw_data *ha = vha->hw;
5666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5667 "Entered %s.\n", __func__);
5669 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5670 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5671 return QLA_FUNCTION_FAILED;
5672 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5673 mcp->out_mb = MBX_0;
5674 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5675 mcp->tov = MBX_TOV_SECONDS;
5678 rval = qla2x00_mailbox_command(vha, mcp);
5680 if (rval != QLA_SUCCESS) {
5681 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5682 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5684 /* Copy all bits to preserve original value */
5685 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5688 "Done %s.\n", __func__);
5694 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5698 mbx_cmd_t *mcp = &mc;
5700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5701 "Entered %s.\n", __func__);
5703 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5704 /* Copy all bits to preserve original setting */
5705 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5706 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5708 mcp->tov = MBX_TOV_SECONDS;
5710 rval = qla2x00_mailbox_command(vha, mcp);
5712 if (rval != QLA_SUCCESS) {
5713 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5714 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5717 "Done %s.\n", __func__);
5724 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5729 mbx_cmd_t *mcp = &mc;
5730 struct qla_hw_data *ha = vha->hw;
5732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5733 "Entered %s.\n", __func__);
5735 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5736 return QLA_FUNCTION_FAILED;
5738 mcp->mb[0] = MBC_PORT_PARAMS;
5739 mcp->mb[1] = loop_id;
5740 if (ha->flags.fcp_prio_enabled)
5744 mcp->mb[4] = priority & 0xf;
5745 mcp->mb[9] = vha->vp_idx;
5746 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5747 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5748 mcp->tov = MBX_TOV_SECONDS;
5750 rval = qla2x00_mailbox_command(vha, mcp);
5758 if (rval != QLA_SUCCESS) {
5759 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5762 "Done %s.\n", __func__);
5769 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5771 int rval = QLA_FUNCTION_FAILED;
5772 struct qla_hw_data *ha = vha->hw;
5775 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5776 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5777 "Thermal not supported by this card.\n");
5781 if (IS_QLA25XX(ha)) {
5782 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5783 ha->pdev->subsystem_device == 0x0175) {
5784 rval = qla2x00_read_sfp(vha, 0, &byte,
5785 0x98, 0x1, 1, BIT_13|BIT_0);
5789 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5790 ha->pdev->subsystem_device == 0x338e) {
5791 rval = qla2x00_read_sfp(vha, 0, &byte,
5792 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5796 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5797 "Thermal not supported by this card.\n");
5801 if (IS_QLA82XX(ha)) {
5802 *temp = qla82xx_read_temperature(vha);
5805 } else if (IS_QLA8044(ha)) {
5806 *temp = qla8044_read_temperature(vha);
5811 rval = qla2x00_read_asic_temperature(vha, temp);
5816 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5819 struct qla_hw_data *ha = vha->hw;
5821 mbx_cmd_t *mcp = &mc;
5823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5824 "Entered %s.\n", __func__);
5826 if (!IS_FWI2_CAPABLE(ha))
5827 return QLA_FUNCTION_FAILED;
5829 memset(mcp, 0, sizeof(mbx_cmd_t));
5830 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5833 mcp->out_mb = MBX_1|MBX_0;
5835 mcp->tov = MBX_TOV_SECONDS;
5838 rval = qla2x00_mailbox_command(vha, mcp);
5839 if (rval != QLA_SUCCESS) {
5840 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5841 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5843 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5844 "Done %s.\n", __func__);
5851 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5854 struct qla_hw_data *ha = vha->hw;
5856 mbx_cmd_t *mcp = &mc;
5858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5859 "Entered %s.\n", __func__);
5861 if (!IS_P3P_TYPE(ha))
5862 return QLA_FUNCTION_FAILED;
5864 memset(mcp, 0, sizeof(mbx_cmd_t));
5865 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5868 mcp->out_mb = MBX_1|MBX_0;
5870 mcp->tov = MBX_TOV_SECONDS;
5873 rval = qla2x00_mailbox_command(vha, mcp);
5874 if (rval != QLA_SUCCESS) {
5875 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5876 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5879 "Done %s.\n", __func__);
5886 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5888 struct qla_hw_data *ha = vha->hw;
5890 mbx_cmd_t *mcp = &mc;
5891 int rval = QLA_FUNCTION_FAILED;
5893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5894 "Entered %s.\n", __func__);
5896 memset(mcp->mb, 0 , sizeof(mcp->mb));
5897 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5898 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5899 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5900 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5902 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5903 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5904 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5906 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5907 mcp->tov = MBX_TOV_SECONDS;
5908 rval = qla2x00_mailbox_command(vha, mcp);
5910 /* Always copy back return mailbox values. */
5911 if (rval != QLA_SUCCESS) {
5912 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5913 "mailbox command FAILED=0x%x, subcode=%x.\n",
5914 (mcp->mb[1] << 16) | mcp->mb[0],
5915 (mcp->mb[3] << 16) | mcp->mb[2]);
5917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5918 "Done %s.\n", __func__);
5919 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5920 if (!ha->md_template_size) {
5921 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5922 "Null template size obtained.\n");
5923 rval = QLA_FUNCTION_FAILED;
5930 qla82xx_md_get_template(scsi_qla_host_t *vha)
5932 struct qla_hw_data *ha = vha->hw;
5934 mbx_cmd_t *mcp = &mc;
5935 int rval = QLA_FUNCTION_FAILED;
5937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5938 "Entered %s.\n", __func__);
5940 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5941 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5942 if (!ha->md_tmplt_hdr) {
5943 ql_log(ql_log_warn, vha, 0x1124,
5944 "Unable to allocate memory for Minidump template.\n");
5948 memset(mcp->mb, 0 , sizeof(mcp->mb));
5949 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5950 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5951 mcp->mb[2] = LSW(RQST_TMPLT);
5952 mcp->mb[3] = MSW(RQST_TMPLT);
5953 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5954 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5955 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5956 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5957 mcp->mb[8] = LSW(ha->md_template_size);
5958 mcp->mb[9] = MSW(ha->md_template_size);
5960 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5961 mcp->tov = MBX_TOV_SECONDS;
5962 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5963 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5964 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5965 rval = qla2x00_mailbox_command(vha, mcp);
5967 if (rval != QLA_SUCCESS) {
5968 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5969 "mailbox command FAILED=0x%x, subcode=%x.\n",
5970 ((mcp->mb[1] << 16) | mcp->mb[0]),
5971 ((mcp->mb[3] << 16) | mcp->mb[2]));
5973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5974 "Done %s.\n", __func__);
5979 qla8044_md_get_template(scsi_qla_host_t *vha)
5981 struct qla_hw_data *ha = vha->hw;
5983 mbx_cmd_t *mcp = &mc;
5984 int rval = QLA_FUNCTION_FAILED;
5985 int offset = 0, size = MINIDUMP_SIZE_36K;
5987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5988 "Entered %s.\n", __func__);
5990 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5991 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5992 if (!ha->md_tmplt_hdr) {
5993 ql_log(ql_log_warn, vha, 0xb11b,
5994 "Unable to allocate memory for Minidump template.\n");
5998 memset(mcp->mb, 0 , sizeof(mcp->mb));
5999 while (offset < ha->md_template_size) {
6000 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6001 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6002 mcp->mb[2] = LSW(RQST_TMPLT);
6003 mcp->mb[3] = MSW(RQST_TMPLT);
6004 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6005 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6006 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6007 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6008 mcp->mb[8] = LSW(size);
6009 mcp->mb[9] = MSW(size);
6010 mcp->mb[10] = offset & 0x0000FFFF;
6011 mcp->mb[11] = offset & 0xFFFF0000;
6012 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6013 mcp->tov = MBX_TOV_SECONDS;
6014 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6015 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6016 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6017 rval = qla2x00_mailbox_command(vha, mcp);
6019 if (rval != QLA_SUCCESS) {
6020 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6021 "mailbox command FAILED=0x%x, subcode=%x.\n",
6022 ((mcp->mb[1] << 16) | mcp->mb[0]),
6023 ((mcp->mb[3] << 16) | mcp->mb[2]));
6026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6027 "Done %s.\n", __func__);
6028 offset = offset + size;
6034 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6037 struct qla_hw_data *ha = vha->hw;
6039 mbx_cmd_t *mcp = &mc;
6041 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6042 return QLA_FUNCTION_FAILED;
6044 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6045 "Entered %s.\n", __func__);
6047 memset(mcp, 0, sizeof(mbx_cmd_t));
6048 mcp->mb[0] = MBC_SET_LED_CONFIG;
6049 mcp->mb[1] = led_cfg[0];
6050 mcp->mb[2] = led_cfg[1];
6051 if (IS_QLA8031(ha)) {
6052 mcp->mb[3] = led_cfg[2];
6053 mcp->mb[4] = led_cfg[3];
6054 mcp->mb[5] = led_cfg[4];
6055 mcp->mb[6] = led_cfg[5];
6058 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6060 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6062 mcp->tov = MBX_TOV_SECONDS;
6065 rval = qla2x00_mailbox_command(vha, mcp);
6066 if (rval != QLA_SUCCESS) {
6067 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6068 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6071 "Done %s.\n", __func__);
6078 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6081 struct qla_hw_data *ha = vha->hw;
6083 mbx_cmd_t *mcp = &mc;
6085 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6086 return QLA_FUNCTION_FAILED;
6088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6089 "Entered %s.\n", __func__);
6091 memset(mcp, 0, sizeof(mbx_cmd_t));
6092 mcp->mb[0] = MBC_GET_LED_CONFIG;
6094 mcp->out_mb = MBX_0;
6095 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6097 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6098 mcp->tov = MBX_TOV_SECONDS;
6101 rval = qla2x00_mailbox_command(vha, mcp);
6102 if (rval != QLA_SUCCESS) {
6103 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6104 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6106 led_cfg[0] = mcp->mb[1];
6107 led_cfg[1] = mcp->mb[2];
6108 if (IS_QLA8031(ha)) {
6109 led_cfg[2] = mcp->mb[3];
6110 led_cfg[3] = mcp->mb[4];
6111 led_cfg[4] = mcp->mb[5];
6112 led_cfg[5] = mcp->mb[6];
6114 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6115 "Done %s.\n", __func__);
6122 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6125 struct qla_hw_data *ha = vha->hw;
6127 mbx_cmd_t *mcp = &mc;
6129 if (!IS_P3P_TYPE(ha))
6130 return QLA_FUNCTION_FAILED;
6132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6133 "Entered %s.\n", __func__);
6135 memset(mcp, 0, sizeof(mbx_cmd_t));
6136 mcp->mb[0] = MBC_SET_LED_CONFIG;
6142 mcp->out_mb = MBX_7|MBX_0;
6144 mcp->tov = MBX_TOV_SECONDS;
6147 rval = qla2x00_mailbox_command(vha, mcp);
6148 if (rval != QLA_SUCCESS) {
6149 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6150 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6153 "Done %s.\n", __func__);
6160 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6163 struct qla_hw_data *ha = vha->hw;
6165 mbx_cmd_t *mcp = &mc;
6167 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6168 return QLA_FUNCTION_FAILED;
6170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6171 "Entered %s.\n", __func__);
6173 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6174 mcp->mb[1] = LSW(reg);
6175 mcp->mb[2] = MSW(reg);
6176 mcp->mb[3] = LSW(data);
6177 mcp->mb[4] = MSW(data);
6178 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6180 mcp->in_mb = MBX_1|MBX_0;
6181 mcp->tov = MBX_TOV_SECONDS;
6183 rval = qla2x00_mailbox_command(vha, mcp);
6185 if (rval != QLA_SUCCESS) {
6186 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6187 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6190 "Done %s.\n", __func__);
6197 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6200 struct qla_hw_data *ha = vha->hw;
6202 mbx_cmd_t *mcp = &mc;
6204 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6206 "Implicit LOGO Unsupported.\n");
6207 return QLA_FUNCTION_FAILED;
6211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6212 "Entering %s.\n", __func__);
6214 /* Perform Implicit LOGO. */
6215 mcp->mb[0] = MBC_PORT_LOGOUT;
6216 mcp->mb[1] = fcport->loop_id;
6217 mcp->mb[10] = BIT_15;
6218 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6220 mcp->tov = MBX_TOV_SECONDS;
6222 rval = qla2x00_mailbox_command(vha, mcp);
6223 if (rval != QLA_SUCCESS)
6224 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6225 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6228 "Done %s.\n", __func__);
6234 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6238 mbx_cmd_t *mcp = &mc;
6239 struct qla_hw_data *ha = vha->hw;
6240 unsigned long retry_max_time = jiffies + (2 * HZ);
6242 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6243 return QLA_FUNCTION_FAILED;
6245 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6248 mcp->mb[0] = MBC_READ_REMOTE_REG;
6249 mcp->mb[1] = LSW(reg);
6250 mcp->mb[2] = MSW(reg);
6251 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6252 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6253 mcp->tov = MBX_TOV_SECONDS;
6255 rval = qla2x00_mailbox_command(vha, mcp);
6257 if (rval != QLA_SUCCESS) {
6258 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6259 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6260 rval, mcp->mb[0], mcp->mb[1]);
6262 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6263 if (*data == QLA8XXX_BAD_VALUE) {
6265 * During soft-reset CAMRAM register reads might
6266 * return 0xbad0bad0. So retry for MAX of 2 sec
6267 * while reading camram registers.
6269 if (time_after(jiffies, retry_max_time)) {
6270 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6271 "Failure to read CAMRAM register. "
6272 "data=0x%x.\n", *data);
6273 return QLA_FUNCTION_FAILED;
6278 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6285 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6289 mbx_cmd_t *mcp = &mc;
6290 struct qla_hw_data *ha = vha->hw;
6292 if (!IS_QLA83XX(ha))
6293 return QLA_FUNCTION_FAILED;
6295 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6297 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6298 mcp->out_mb = MBX_0;
6299 mcp->in_mb = MBX_1|MBX_0;
6300 mcp->tov = MBX_TOV_SECONDS;
6302 rval = qla2x00_mailbox_command(vha, mcp);
6304 if (rval != QLA_SUCCESS) {
6305 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6306 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6307 rval, mcp->mb[0], mcp->mb[1]);
6308 qla2xxx_dump_fw(vha);
6310 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6317 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6318 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6322 mbx_cmd_t *mcp = &mc;
6323 uint8_t subcode = (uint8_t)options;
6324 struct qla_hw_data *ha = vha->hw;
6326 if (!IS_QLA8031(ha))
6327 return QLA_FUNCTION_FAILED;
6329 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6331 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6332 mcp->mb[1] = options;
6333 mcp->out_mb = MBX_1|MBX_0;
6334 if (subcode & BIT_2) {
6335 mcp->mb[2] = LSW(start_addr);
6336 mcp->mb[3] = MSW(start_addr);
6337 mcp->mb[4] = LSW(end_addr);
6338 mcp->mb[5] = MSW(end_addr);
6339 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6341 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6342 if (!(subcode & (BIT_2 | BIT_5)))
6343 mcp->in_mb |= MBX_4|MBX_3;
6344 mcp->tov = MBX_TOV_SECONDS;
6346 rval = qla2x00_mailbox_command(vha, mcp);
6348 if (rval != QLA_SUCCESS) {
6349 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6350 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6351 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6353 qla2xxx_dump_fw(vha);
6355 if (subcode & BIT_5)
6356 *sector_size = mcp->mb[1];
6357 else if (subcode & (BIT_6 | BIT_7)) {
6358 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6359 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6360 } else if (subcode & (BIT_3 | BIT_4)) {
6361 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6362 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6364 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6371 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6376 mbx_cmd_t *mcp = &mc;
6378 if (!IS_MCTP_CAPABLE(vha->hw))
6379 return QLA_FUNCTION_FAILED;
6381 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6382 "Entered %s.\n", __func__);
6384 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6385 mcp->mb[1] = LSW(addr);
6386 mcp->mb[2] = MSW(req_dma);
6387 mcp->mb[3] = LSW(req_dma);
6388 mcp->mb[4] = MSW(size);
6389 mcp->mb[5] = LSW(size);
6390 mcp->mb[6] = MSW(MSD(req_dma));
6391 mcp->mb[7] = LSW(MSD(req_dma));
6392 mcp->mb[8] = MSW(addr);
6393 /* Setting RAM ID to valid */
6394 /* For MCTP RAM ID is 0x40 */
6395 mcp->mb[10] = BIT_7 | 0x40;
6397 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6401 mcp->tov = MBX_TOV_SECONDS;
6403 rval = qla2x00_mailbox_command(vha, mcp);
6405 if (rval != QLA_SUCCESS) {
6406 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6407 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6410 "Done %s.\n", __func__);
6417 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6418 void *dd_buf, uint size, uint options)
6422 mbx_cmd_t *mcp = &mc;
6425 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6426 !IS_QLA28XX(vha->hw))
6427 return QLA_FUNCTION_FAILED;
6429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6430 "Entered %s.\n", __func__);
6432 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6433 dd_buf, size, DMA_FROM_DEVICE);
6434 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6435 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6436 return QLA_MEMORY_ALLOC_FAILED;
6439 memset(dd_buf, 0, size);
6441 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6442 mcp->mb[1] = options;
6443 mcp->mb[2] = MSW(LSD(dd_dma));
6444 mcp->mb[3] = LSW(LSD(dd_dma));
6445 mcp->mb[6] = MSW(MSD(dd_dma));
6446 mcp->mb[7] = LSW(MSD(dd_dma));
6448 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6449 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6450 mcp->buf_size = size;
6451 mcp->flags = MBX_DMA_IN;
6452 mcp->tov = MBX_TOV_SECONDS * 4;
6453 rval = qla2x00_mailbox_command(vha, mcp);
6455 if (rval != QLA_SUCCESS) {
6456 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6458 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6459 "Done %s.\n", __func__);
6462 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6463 size, DMA_FROM_DEVICE);
6468 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6470 sp->u.iocb_cmd.u.mbx.rc = res;
6472 complete(&sp->u.iocb_cmd.u.mbx.comp);
6473 /* don't free sp here. Let the caller do the free */
6477 * This mailbox uses the iocb interface to send MB command.
6478 * This allows non-critial (non chip setup) command to go
6481 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6483 int rval = QLA_FUNCTION_FAILED;
6487 if (!vha->hw->flags.fw_started)
6491 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6495 c = &sp->u.iocb_cmd;
6496 init_completion(&c->u.mbx.comp);
6498 sp->type = SRB_MB_IOCB;
6499 sp->name = mb_to_str(mcp->mb[0]);
6500 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6501 qla2x00_async_mb_sp_done);
6503 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6505 rval = qla2x00_start_sp(sp);
6506 if (rval != QLA_SUCCESS) {
6507 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6508 "%s: %s Failed submission. %x.\n",
6509 __func__, sp->name, rval);
6513 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6514 sp->name, sp->handle);
6516 wait_for_completion(&c->u.mbx.comp);
6517 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6521 case QLA_FUNCTION_TIMEOUT:
6522 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6523 __func__, sp->name, rval);
6526 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6527 __func__, sp->name);
6530 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6531 __func__, sp->name, rval);
6537 kref_put(&sp->cmd_kref, qla2x00_sp_release);
6544 * NOTE: Do not call this routine from DPC thread
6546 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6548 int rval = QLA_FUNCTION_FAILED;
6550 struct port_database_24xx *pd;
6551 struct qla_hw_data *ha = vha->hw;
6554 if (!vha->hw->flags.fw_started)
6557 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6559 ql_log(ql_log_warn, vha, 0xd047,
6560 "Failed to allocate port database structure.\n");
6564 memset(&mc, 0, sizeof(mc));
6565 mc.mb[0] = MBC_GET_PORT_DATABASE;
6566 mc.mb[1] = fcport->loop_id;
6567 mc.mb[2] = MSW(pd_dma);
6568 mc.mb[3] = LSW(pd_dma);
6569 mc.mb[6] = MSW(MSD(pd_dma));
6570 mc.mb[7] = LSW(MSD(pd_dma));
6571 mc.mb[9] = vha->vp_idx;
6574 rval = qla24xx_send_mb_cmd(vha, &mc);
6575 if (rval != QLA_SUCCESS) {
6576 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6577 "%s: %8phC fail\n", __func__, fcport->port_name);
6581 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6583 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6584 __func__, fcport->port_name);
6588 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6593 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6594 struct port_database_24xx *pd)
6596 int rval = QLA_SUCCESS;
6598 u8 current_login_state, last_login_state;
6600 if (NVME_TARGET(vha->hw, fcport)) {
6601 current_login_state = pd->current_login_state >> 4;
6602 last_login_state = pd->last_login_state >> 4;
6604 current_login_state = pd->current_login_state & 0xf;
6605 last_login_state = pd->last_login_state & 0xf;
6608 /* Check for logged in state. */
6609 if (current_login_state != PDS_PRLI_COMPLETE) {
6610 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6611 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6612 current_login_state, last_login_state, fcport->loop_id);
6613 rval = QLA_FUNCTION_FAILED;
6617 if (fcport->loop_id == FC_NO_LOOP_ID ||
6618 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6619 memcmp(fcport->port_name, pd->port_name, 8))) {
6620 /* We lost the device mid way. */
6621 rval = QLA_NOT_LOGGED_IN;
6625 /* Names are little-endian. */
6626 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6627 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6629 /* Get port_id of device. */
6630 fcport->d_id.b.domain = pd->port_id[0];
6631 fcport->d_id.b.area = pd->port_id[1];
6632 fcport->d_id.b.al_pa = pd->port_id[2];
6633 fcport->d_id.b.rsvd_1 = 0;
6635 ql_dbg(ql_dbg_disc, vha, 0x2062,
6636 "%8phC SVC Param w3 %02x%02x",
6638 pd->prli_svc_param_word_3[1],
6639 pd->prli_svc_param_word_3[0]);
6641 if (NVME_TARGET(vha->hw, fcport)) {
6642 fcport->port_type = FCT_NVME;
6643 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6644 fcport->port_type |= FCT_NVME_INITIATOR;
6645 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6646 fcport->port_type |= FCT_NVME_TARGET;
6647 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6648 fcport->port_type |= FCT_NVME_DISCOVERY;
6650 /* If not target must be initiator or unknown type. */
6651 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6652 fcport->port_type = FCT_INITIATOR;
6654 fcport->port_type = FCT_TARGET;
6656 /* Passback COS information. */
6657 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6658 FC_COS_CLASS2 : FC_COS_CLASS3;
6660 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6661 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6662 fcport->conf_compl_supported = 1;
6670 * qla24xx_gidlist__wait
6671 * NOTE: don't call this routine from DPC thread.
6673 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6674 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6676 int rval = QLA_FUNCTION_FAILED;
6679 if (!vha->hw->flags.fw_started)
6682 memset(&mc, 0, sizeof(mc));
6683 mc.mb[0] = MBC_GET_ID_LIST;
6684 mc.mb[2] = MSW(id_list_dma);
6685 mc.mb[3] = LSW(id_list_dma);
6686 mc.mb[6] = MSW(MSD(id_list_dma));
6687 mc.mb[7] = LSW(MSD(id_list_dma));
6689 mc.mb[9] = vha->vp_idx;
6691 rval = qla24xx_send_mb_cmd(vha, &mc);
6692 if (rval != QLA_SUCCESS) {
6693 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6694 "%s: fail\n", __func__);
6696 *entries = mc.mb[1];
6697 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6698 "%s: done\n", __func__);
6704 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6708 mbx_cmd_t *mcp = &mc;
6710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6711 "Entered %s\n", __func__);
6713 memset(mcp->mb, 0 , sizeof(mcp->mb));
6714 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6717 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6718 mcp->in_mb = MBX_2 | MBX_0;
6719 mcp->tov = MBX_TOV_SECONDS;
6722 rval = qla2x00_mailbox_command(vha, mcp);
6724 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6725 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6730 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6734 mbx_cmd_t *mcp = &mc;
6736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6737 "Entered %s\n", __func__);
6739 memset(mcp->mb, 0, sizeof(mcp->mb));
6740 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6742 mcp->out_mb = MBX_1 | MBX_0;
6743 mcp->in_mb = MBX_2 | MBX_0;
6744 mcp->tov = MBX_TOV_SECONDS;
6747 rval = qla2x00_mailbox_command(vha, mcp);
6748 if (rval == QLA_SUCCESS)
6751 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6752 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6758 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6760 struct qla_hw_data *ha = vha->hw;
6761 uint16_t iter, addr, offset;
6762 dma_addr_t phys_addr;
6766 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6768 phys_addr = ha->sfp_data_dma;
6769 sfp_data = ha->sfp_data;
6772 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6774 /* Skip to next device address. */
6779 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6780 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6781 if (rval != QLA_SUCCESS) {
6782 ql_log(ql_log_warn, vha, 0x706d,
6783 "Unable to read SFP data (%x/%x/%x).\n", rval,
6789 if (buf && (c < count)) {
6792 if ((count - c) >= SFP_BLOCK_SIZE)
6793 sz = SFP_BLOCK_SIZE;
6797 memcpy(buf, sfp_data, sz);
6798 buf += SFP_BLOCK_SIZE;
6801 phys_addr += SFP_BLOCK_SIZE;
6802 sfp_data += SFP_BLOCK_SIZE;
6803 offset += SFP_BLOCK_SIZE;
6809 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6810 uint16_t *out_mb, int out_mb_sz)
6812 int rval = QLA_FUNCTION_FAILED;
6815 if (!vha->hw->flags.fw_started)
6818 memset(&mc, 0, sizeof(mc));
6819 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6821 rval = qla24xx_send_mb_cmd(vha, &mc);
6822 if (rval != QLA_SUCCESS) {
6823 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6824 "%s: fail\n", __func__);
6826 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6827 memcpy(out_mb, mc.mb, out_mb_sz);
6829 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6831 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6832 "%s: done\n", __func__);
6838 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6839 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6844 mbx_cmd_t *mcp = &mc;
6846 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6848 mcp->mb[2] = region;
6849 mcp->mb[3] = MSW(len);
6850 mcp->mb[4] = LSW(len);
6851 mcp->mb[5] = MSW(sfub_dma_addr);
6852 mcp->mb[6] = LSW(sfub_dma_addr);
6853 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6854 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6855 mcp->mb[9] = sfub_len;
6857 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6858 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6859 mcp->tov = MBX_TOV_SECONDS;
6861 rval = qla2x00_mailbox_command(vha, mcp);
6863 if (rval != QLA_SUCCESS) {
6864 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6865 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6872 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6877 mbx_cmd_t *mcp = &mc;
6879 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6880 "Entered %s.\n", __func__);
6882 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6883 mcp->mb[1] = LSW(addr);
6884 mcp->mb[2] = MSW(addr);
6885 mcp->mb[3] = LSW(data);
6886 mcp->mb[4] = MSW(data);
6887 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6888 mcp->in_mb = MBX_1|MBX_0;
6889 mcp->tov = MBX_TOV_SECONDS;
6891 rval = qla2x00_mailbox_command(vha, mcp);
6893 if (rval != QLA_SUCCESS) {
6894 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6895 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6897 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6898 "Done %s.\n", __func__);
6904 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6909 mbx_cmd_t *mcp = &mc;
6911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6912 "Entered %s.\n", __func__);
6914 mcp->mb[0] = MBC_READ_REMOTE_REG;
6915 mcp->mb[1] = LSW(addr);
6916 mcp->mb[2] = MSW(addr);
6917 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6918 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6919 mcp->tov = MBX_TOV_SECONDS;
6921 rval = qla2x00_mailbox_command(vha, mcp);
6923 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6925 if (rval != QLA_SUCCESS) {
6926 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6927 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6930 "Done %s.\n", __func__);
6937 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6939 struct qla_hw_data *ha = vha->hw;
6941 mbx_cmd_t *mcp = &mc;
6944 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6945 return QLA_FUNCTION_FAILED;
6947 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6950 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6951 mcp->mb[1] = options;
6952 mcp->out_mb = MBX_1|MBX_0;
6953 mcp->in_mb = MBX_1|MBX_0;
6954 if (options & BIT_0) {
6955 if (options & BIT_1) {
6956 mcp->mb[2] = led[2];
6957 mcp->out_mb |= MBX_2;
6959 if (options & BIT_2) {
6960 mcp->mb[3] = led[0];
6961 mcp->out_mb |= MBX_3;
6963 if (options & BIT_3) {
6964 mcp->mb[4] = led[1];
6965 mcp->out_mb |= MBX_4;
6968 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6970 mcp->tov = MBX_TOV_SECONDS;
6972 rval = qla2x00_mailbox_command(vha, mcp);
6974 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6975 __func__, rval, mcp->mb[0], mcp->mb[1]);
6979 if (options & BIT_0) {
6980 ha->beacon_blink_led = 0;
6981 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6983 led[2] = mcp->mb[2];
6984 led[0] = mcp->mb[3];
6985 led[1] = mcp->mb[4];
6986 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6987 __func__, led[0], led[1], led[2]);
6994 * qla_no_op_mb(): This MB is used to check if FW is still alive and
6995 * able to generate an interrupt. Otherwise, a timeout will trigger
6997 * @vha: host adapter pointer
7000 void qla_no_op_mb(struct scsi_qla_host *vha)
7003 mbx_cmd_t *mcp = &mc;
7006 memset(&mc, 0, sizeof(mc));
7007 mcp->mb[0] = 0; // noop cmd= 0
7008 mcp->out_mb = MBX_0;
7012 rval = qla2x00_mailbox_command(vha, mcp);
7015 ql_dbg(ql_dbg_async, vha, 0x7071,
7016 "Failed %s %x\n", __func__, rval);