2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
14 #define IS_PPCARCH true
16 #define IS_PPCARCH false
19 static struct mb_cmd_name {
23 {MBC_GET_PORT_DATABASE, "GPDB"},
24 {MBC_GET_ID_LIST, "GIDList"},
25 {MBC_GET_LINK_PRIV_STATS, "Stats"},
26 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
29 static const char *mb_to_str(uint16_t cmd)
32 struct mb_cmd_name *e;
34 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
42 static struct rom_cmd {
46 { MBC_EXECUTE_FIRMWARE },
47 { MBC_READ_RAM_WORD },
48 { MBC_MAILBOX_REGISTER_TEST },
49 { MBC_VERIFY_CHECKSUM },
50 { MBC_GET_FIRMWARE_VERSION },
51 { MBC_LOAD_RISC_RAM },
52 { MBC_DUMP_RISC_RAM },
53 { MBC_LOAD_RISC_RAM_EXTENDED },
54 { MBC_DUMP_RISC_RAM_EXTENDED },
55 { MBC_WRITE_RAM_WORD_EXTENDED },
56 { MBC_READ_RAM_EXTENDED },
57 { MBC_GET_RESOURCE_COUNTS },
58 { MBC_SET_FIRMWARE_OPTION },
59 { MBC_MID_INITIALIZE_FIRMWARE },
60 { MBC_GET_FIRMWARE_STATE },
61 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
62 { MBC_GET_RETRY_COUNT },
63 { MBC_TRACE_CONTROL },
64 { MBC_INITIALIZE_MULTIQ },
65 { MBC_IOCB_COMMAND_A64 },
66 { MBC_GET_ADAPTER_LOOP_ID },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
72 static int is_rom_cmd(uint16_t cmd)
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
109 unsigned long flags = 0;
111 uint8_t abort_active;
113 uint16_t command = 0;
115 uint16_t __iomem *optr;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126 if (ha->pdev->error_state > pci_channel_io_frozen) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 "error_state is greater than pci_channel_io_frozen, "
130 return QLA_FUNCTION_TIMEOUT;
133 if (vha->device_flags & DFLG_DEV_FAILED) {
134 ql_log(ql_log_warn, vha, 0x1002,
135 "Device in failed state, exiting.\n");
136 return QLA_FUNCTION_TIMEOUT;
139 /* if PCI error, then avoid mbx processing.*/
140 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
141 test_bit(UNLOADING, &base_vha->dpc_flags)) {
142 ql_log(ql_log_warn, vha, 0xd04e,
143 "PCI error, exiting.\n");
144 return QLA_FUNCTION_TIMEOUT;
148 io_lock_on = base_vha->flags.init_done;
151 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
152 chip_reset = ha->chip_reset;
154 if (ha->flags.pci_channel_io_perm_failure) {
155 ql_log(ql_log_warn, vha, 0x1003,
156 "Perm failure on EEH timeout MBX, exiting.\n");
157 return QLA_FUNCTION_TIMEOUT;
160 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
161 /* Setting Link-Down error */
162 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
163 ql_log(ql_log_warn, vha, 0x1004,
164 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
165 return QLA_FUNCTION_TIMEOUT;
168 /* check if ISP abort is active and return cmd with timeout */
169 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
171 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
172 !is_rom_cmd(mcp->mb[0])) {
173 ql_log(ql_log_info, vha, 0x1005,
174 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
176 return QLA_FUNCTION_TIMEOUT;
179 atomic_inc(&ha->num_pend_mbx_stage1);
181 * Wait for active mailbox commands to finish by waiting at most tov
182 * seconds. This is to serialize actual issuing of mailbox cmds during
183 * non ISP abort time.
185 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
186 /* Timeout occurred. Return error. */
187 ql_log(ql_log_warn, vha, 0xd035,
188 "Cmd access timeout, cmd=0x%x, Exiting.\n",
190 atomic_dec(&ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
200 /* Save mailbox command for debug */
203 ql_dbg(ql_dbg_mbx, vha, 0x1006,
204 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
206 spin_lock_irqsave(&ha->hardware_lock, flags);
208 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
209 ha->flags.mbox_busy) {
211 spin_unlock_irqrestore(&ha->hardware_lock, flags);
214 ha->flags.mbox_busy = 1;
216 /* Load mailbox registers. */
218 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
219 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
220 optr = (uint16_t __iomem *)®->isp24.mailbox0;
222 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
225 command = mcp->mb[0];
226 mboxes = mcp->out_mb;
228 ql_dbg(ql_dbg_mbx, vha, 0x1111,
229 "Mailbox registers (OUT):\n");
230 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
231 if (IS_QLA2200(ha) && cnt == 8)
233 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
234 if (mboxes & BIT_0) {
235 ql_dbg(ql_dbg_mbx, vha, 0x1112,
236 "mbox[%d]<-0x%04x\n", cnt, *iptr);
237 WRT_REG_WORD(optr, *iptr);
245 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
246 "I/O Address = %p.\n", optr);
248 /* Issue set host interrupt command to send cmd out. */
249 ha->flags.mbox_int = 0;
250 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
252 /* Unlock mbx registers and wait for interrupt */
253 ql_dbg(ql_dbg_mbx, vha, 0x100f,
254 "Going to unlock irq & waiting for interrupts. "
255 "jiffies=%lx.\n", jiffies);
257 /* Wait for mbx cmd completion until timeout */
258 atomic_inc(&ha->num_pend_mbx_stage2);
259 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
260 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
263 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
264 else if (IS_FWI2_CAPABLE(ha))
265 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
267 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
268 spin_unlock_irqrestore(&ha->hardware_lock, flags);
271 atomic_inc(&ha->num_pend_mbx_stage3);
272 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
274 if (chip_reset != ha->chip_reset) {
275 spin_lock_irqsave(&ha->hardware_lock, flags);
276 ha->flags.mbox_busy = 0;
277 spin_unlock_irqrestore(&ha->hardware_lock,
279 atomic_dec(&ha->num_pend_mbx_stage2);
280 atomic_dec(&ha->num_pend_mbx_stage3);
284 ql_dbg(ql_dbg_mbx, vha, 0x117a,
285 "cmd=%x Timeout.\n", command);
286 spin_lock_irqsave(&ha->hardware_lock, flags);
287 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
288 spin_unlock_irqrestore(&ha->hardware_lock, flags);
290 } else if (ha->flags.purge_mbox ||
291 chip_reset != ha->chip_reset) {
292 spin_lock_irqsave(&ha->hardware_lock, flags);
293 ha->flags.mbox_busy = 0;
294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
295 atomic_dec(&ha->num_pend_mbx_stage2);
296 atomic_dec(&ha->num_pend_mbx_stage3);
300 atomic_dec(&ha->num_pend_mbx_stage3);
302 if (time_after(jiffies, wait_time + 5 * HZ))
303 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
304 command, jiffies_to_msecs(jiffies - wait_time));
306 ql_dbg(ql_dbg_mbx, vha, 0x1011,
307 "Cmd=%x Polling Mode.\n", command);
309 if (IS_P3P_TYPE(ha)) {
310 if (RD_REG_DWORD(®->isp82.hint) &
311 HINT_MBX_INT_PENDING) {
312 ha->flags.mbox_busy = 0;
313 spin_unlock_irqrestore(&ha->hardware_lock,
315 atomic_dec(&ha->num_pend_mbx_stage2);
316 ql_dbg(ql_dbg_mbx, vha, 0x1012,
317 "Pending mailbox timeout, exiting.\n");
318 rval = QLA_FUNCTION_TIMEOUT;
321 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
322 } else if (IS_FWI2_CAPABLE(ha))
323 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
325 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
326 spin_unlock_irqrestore(&ha->hardware_lock, flags);
328 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
329 while (!ha->flags.mbox_int) {
330 if (ha->flags.purge_mbox ||
331 chip_reset != ha->chip_reset) {
332 spin_lock_irqsave(&ha->hardware_lock, flags);
333 ha->flags.mbox_busy = 0;
334 spin_unlock_irqrestore(&ha->hardware_lock,
336 atomic_dec(&ha->num_pend_mbx_stage2);
341 if (time_after(jiffies, wait_time))
344 /* Check for pending interrupts. */
345 qla2x00_poll(ha->rsp_q_map[0]);
347 if (!ha->flags.mbox_int &&
349 command == MBC_LOAD_RISC_RAM_EXTENDED))
352 ql_dbg(ql_dbg_mbx, vha, 0x1013,
354 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
356 atomic_dec(&ha->num_pend_mbx_stage2);
358 /* Check whether we timed out */
359 if (ha->flags.mbox_int) {
362 ql_dbg(ql_dbg_mbx, vha, 0x1014,
363 "Cmd=%x completed.\n", command);
365 /* Got interrupt. Clear the flag. */
366 ha->flags.mbox_int = 0;
367 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
369 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
370 spin_lock_irqsave(&ha->hardware_lock, flags);
371 ha->flags.mbox_busy = 0;
372 spin_unlock_irqrestore(&ha->hardware_lock, flags);
374 /* Setting Link-Down error */
375 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
377 rval = QLA_FUNCTION_FAILED;
378 ql_log(ql_log_warn, vha, 0xd048,
379 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
383 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
384 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
385 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
386 MBS_COMMAND_COMPLETE);
387 rval = QLA_FUNCTION_FAILED;
390 /* Load return mailbox registers. */
392 iptr = (uint16_t *)&ha->mailbox_out[0];
395 ql_dbg(ql_dbg_mbx, vha, 0x1113,
396 "Mailbox registers (IN):\n");
397 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
398 if (mboxes & BIT_0) {
400 ql_dbg(ql_dbg_mbx, vha, 0x1114,
401 "mbox[%d]->0x%04x\n", cnt, *iptr2);
411 uint32_t ictrl, host_status, hccr;
414 if (IS_FWI2_CAPABLE(ha)) {
415 mb[0] = RD_REG_WORD(®->isp24.mailbox0);
416 mb[1] = RD_REG_WORD(®->isp24.mailbox1);
417 mb[2] = RD_REG_WORD(®->isp24.mailbox2);
418 mb[3] = RD_REG_WORD(®->isp24.mailbox3);
419 mb[7] = RD_REG_WORD(®->isp24.mailbox7);
420 ictrl = RD_REG_DWORD(®->isp24.ictrl);
421 host_status = RD_REG_DWORD(®->isp24.host_status);
422 hccr = RD_REG_DWORD(®->isp24.hccr);
424 ql_log(ql_log_warn, vha, 0xd04c,
425 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
426 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
427 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
428 mb[7], host_status, hccr);
431 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
432 ictrl = RD_REG_WORD(®->isp.ictrl);
433 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
434 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
435 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
437 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
439 /* Capture FW dump only, if PCI device active */
440 if (!pci_channel_offline(vha->hw->pdev)) {
441 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
442 if (w == 0xffff || ictrl == 0xffffffff ||
443 (chip_reset != ha->chip_reset)) {
444 /* This is special case if there is unload
445 * of driver happening and if PCI device go
446 * into bad state due to PCI error condition
447 * then only PCI ERR flag would be set.
448 * we will do premature exit for above case.
450 spin_lock_irqsave(&ha->hardware_lock, flags);
451 ha->flags.mbox_busy = 0;
452 spin_unlock_irqrestore(&ha->hardware_lock,
454 rval = QLA_FUNCTION_TIMEOUT;
458 /* Attempt to capture firmware dump for further
459 * anallysis of the current formware state. we do not
460 * need to do this if we are intentionally generating
463 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
464 ha->isp_ops->fw_dump(vha, 0);
465 rval = QLA_FUNCTION_TIMEOUT;
468 spin_lock_irqsave(&ha->hardware_lock, flags);
469 ha->flags.mbox_busy = 0;
470 spin_unlock_irqrestore(&ha->hardware_lock, flags);
475 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
476 ql_dbg(ql_dbg_mbx, vha, 0x101a,
477 "Checking for additional resp interrupt.\n");
479 /* polling mode for non isp_abort commands. */
480 qla2x00_poll(ha->rsp_q_map[0]);
483 if (rval == QLA_FUNCTION_TIMEOUT &&
484 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
485 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
486 ha->flags.eeh_busy) {
487 /* not in dpc. schedule it for dpc to take over. */
488 ql_dbg(ql_dbg_mbx, vha, 0x101b,
489 "Timeout, schedule isp_abort_needed.\n");
491 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
492 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
493 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
494 if (IS_QLA82XX(ha)) {
495 ql_dbg(ql_dbg_mbx, vha, 0x112a,
496 "disabling pause transmit on port "
499 QLA82XX_CRB_NIU + 0x98,
500 CRB_NIU_XG_PAUSE_CTL_P0|
501 CRB_NIU_XG_PAUSE_CTL_P1);
503 ql_log(ql_log_info, base_vha, 0x101c,
504 "Mailbox cmd timeout occurred, cmd=0x%x, "
505 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
506 "abort.\n", command, mcp->mb[0],
508 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
509 qla2xxx_wake_dpc(vha);
511 } else if (current == ha->dpc_thread) {
512 /* call abort directly since we are in the DPC thread */
513 ql_dbg(ql_dbg_mbx, vha, 0x101d,
514 "Timeout, calling abort_isp.\n");
516 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
517 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
518 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
519 if (IS_QLA82XX(ha)) {
520 ql_dbg(ql_dbg_mbx, vha, 0x112b,
521 "disabling pause transmit on port "
524 QLA82XX_CRB_NIU + 0x98,
525 CRB_NIU_XG_PAUSE_CTL_P0|
526 CRB_NIU_XG_PAUSE_CTL_P1);
528 ql_log(ql_log_info, base_vha, 0x101e,
529 "Mailbox cmd timeout occurred, cmd=0x%x, "
530 "mb[0]=0x%x. Scheduling ISP abort ",
531 command, mcp->mb[0]);
532 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
533 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
534 /* Allow next mbx cmd to come in. */
535 complete(&ha->mbx_cmd_comp);
536 if (ha->isp_ops->abort_isp(vha)) {
537 /* Failed. retry later. */
538 set_bit(ISP_ABORT_NEEDED,
541 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
542 ql_dbg(ql_dbg_mbx, vha, 0x101f,
543 "Finished abort_isp.\n");
550 /* Allow next mbx cmd to come in. */
551 complete(&ha->mbx_cmd_comp);
554 if (rval == QLA_ABORTED) {
555 ql_log(ql_log_info, vha, 0xd035,
556 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
559 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
560 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
561 dev_name(&ha->pdev->dev), 0x1020+0x800,
565 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
566 if (mboxes & BIT_0) {
567 printk(" mb[%u]=%x", i, mcp->mb[i]);
570 pr_warn(" cmd=%x ****\n", command);
572 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
573 ql_dbg(ql_dbg_mbx, vha, 0x1198,
574 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
575 RD_REG_DWORD(®->isp24.host_status),
576 RD_REG_DWORD(®->isp24.ictrl),
577 RD_REG_DWORD(®->isp24.istatus));
579 ql_dbg(ql_dbg_mbx, vha, 0x1206,
580 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
581 RD_REG_WORD(®->isp.ctrl_status),
582 RD_REG_WORD(®->isp.ictrl),
583 RD_REG_WORD(®->isp.istatus));
586 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
593 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
594 uint32_t risc_code_size)
597 struct qla_hw_data *ha = vha->hw;
599 mbx_cmd_t *mcp = &mc;
601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
602 "Entered %s.\n", __func__);
604 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
605 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
606 mcp->mb[8] = MSW(risc_addr);
607 mcp->out_mb = MBX_8|MBX_0;
609 mcp->mb[0] = MBC_LOAD_RISC_RAM;
612 mcp->mb[1] = LSW(risc_addr);
613 mcp->mb[2] = MSW(req_dma);
614 mcp->mb[3] = LSW(req_dma);
615 mcp->mb[6] = MSW(MSD(req_dma));
616 mcp->mb[7] = LSW(MSD(req_dma));
617 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
618 if (IS_FWI2_CAPABLE(ha)) {
619 mcp->mb[4] = MSW(risc_code_size);
620 mcp->mb[5] = LSW(risc_code_size);
621 mcp->out_mb |= MBX_5|MBX_4;
623 mcp->mb[4] = LSW(risc_code_size);
624 mcp->out_mb |= MBX_4;
627 mcp->in_mb = MBX_1|MBX_0;
628 mcp->tov = MBX_TOV_SECONDS;
630 rval = qla2x00_mailbox_command(vha, mcp);
632 if (rval != QLA_SUCCESS) {
633 ql_dbg(ql_dbg_mbx, vha, 0x1023,
634 "Failed=%x mb[0]=%x mb[1]=%x.\n",
635 rval, mcp->mb[0], mcp->mb[1]);
637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
638 "Done %s.\n", __func__);
644 #define EXTENDED_BB_CREDITS BIT_0
645 #define NVME_ENABLE_FLAG BIT_3
646 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
648 uint16_t mb4 = BIT_0;
650 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
651 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
656 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
658 uint16_t mb4 = BIT_0;
660 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
661 struct nvram_81xx *nv = ha->nvram;
663 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
671 * Start adapter firmware.
674 * ha = adapter block pointer.
675 * TARGET_QUEUE_LOCK must be released.
676 * ADAPTER_STATE_LOCK must be released.
679 * qla2x00 local function return status code.
685 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
688 struct qla_hw_data *ha = vha->hw;
690 mbx_cmd_t *mcp = &mc;
692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
693 "Entered %s.\n", __func__);
695 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
698 if (IS_FWI2_CAPABLE(ha)) {
699 mcp->mb[1] = MSW(risc_addr);
700 mcp->mb[2] = LSW(risc_addr);
704 ha->flags.using_lr_setting = 0;
705 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
706 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
707 if (ql2xautodetectsfp) {
708 if (ha->flags.detected_lr_sfp) {
710 qla25xx_set_sfp_lr_dist(ha);
711 ha->flags.using_lr_setting = 1;
714 struct nvram_81xx *nv = ha->nvram;
715 /* set LR distance if specified in nvram */
716 if (nv->enhanced_features &
717 NEF_LR_DIST_ENABLE) {
719 qla25xx_set_nvr_lr_dist(ha);
720 ha->flags.using_lr_setting = 1;
725 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
726 mcp->mb[4] |= NVME_ENABLE_FLAG;
728 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
729 struct nvram_81xx *nv = ha->nvram;
730 /* set minimum speed if specified in nvram */
731 if (nv->min_supported_speed >= 2 &&
732 nv->min_supported_speed <= 5) {
734 mcp->mb[11] |= nv->min_supported_speed & 0xF;
735 mcp->out_mb |= MBX_11;
737 vha->min_supported_speed =
738 nv->min_supported_speed;
742 mcp->mb[11] |= BIT_4;
745 if (ha->flags.exlogins_enabled)
746 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
748 if (ha->flags.exchoffld_enabled)
749 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
751 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
752 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
754 mcp->mb[1] = LSW(risc_addr);
755 mcp->out_mb |= MBX_1;
756 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
758 mcp->out_mb |= MBX_2;
762 mcp->tov = MBX_TOV_SECONDS;
764 rval = qla2x00_mailbox_command(vha, mcp);
766 if (rval != QLA_SUCCESS) {
767 ql_dbg(ql_dbg_mbx, vha, 0x1026,
768 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
772 if (!IS_FWI2_CAPABLE(ha))
775 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
776 ql_dbg(ql_dbg_mbx, vha, 0x119a,
777 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
778 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
779 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
780 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
781 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
782 ha->max_supported_speed == 0 ? "16Gps" :
783 ha->max_supported_speed == 1 ? "32Gps" :
784 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
785 if (vha->min_supported_speed) {
786 ha->min_supported_speed = mcp->mb[5] &
787 (BIT_0 | BIT_1 | BIT_2);
788 ql_dbg(ql_dbg_mbx, vha, 0x119c,
789 "min_supported_speed=%s.\n",
790 ha->min_supported_speed == 6 ? "64Gps" :
791 ha->min_supported_speed == 5 ? "32Gps" :
792 ha->min_supported_speed == 4 ? "16Gps" :
793 ha->min_supported_speed == 3 ? "8Gps" :
794 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
800 "Done %s.\n", __func__);
806 * qla_get_exlogin_status
807 * Get extended login status
808 * uses the memory offload control/status Mailbox
811 * ha: adapter state pointer.
812 * fwopt: firmware options
815 * qla2x00 local function status
820 #define FETCH_XLOGINS_STAT 0x8
822 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
823 uint16_t *ex_logins_cnt)
827 mbx_cmd_t *mcp = &mc;
829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
830 "Entered %s\n", __func__);
832 memset(mcp->mb, 0 , sizeof(mcp->mb));
833 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
834 mcp->mb[1] = FETCH_XLOGINS_STAT;
835 mcp->out_mb = MBX_1|MBX_0;
836 mcp->in_mb = MBX_10|MBX_4|MBX_0;
837 mcp->tov = MBX_TOV_SECONDS;
840 rval = qla2x00_mailbox_command(vha, mcp);
841 if (rval != QLA_SUCCESS) {
842 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
844 *buf_sz = mcp->mb[4];
845 *ex_logins_cnt = mcp->mb[10];
847 ql_log(ql_log_info, vha, 0x1190,
848 "buffer size 0x%x, exchange login count=%d\n",
849 mcp->mb[4], mcp->mb[10]);
851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
852 "Done %s.\n", __func__);
859 * qla_set_exlogin_mem_cfg
860 * set extended login memory configuration
861 * Mbx needs to be issues before init_cb is set
864 * ha: adapter state pointer.
865 * buffer: buffer pointer
866 * phys_addr: physical address of buffer
867 * size: size of buffer
868 * TARGET_QUEUE_LOCK must be released
869 * ADAPTER_STATE_LOCK must be release
872 * qla2x00 local funxtion status code.
877 #define CONFIG_XLOGINS_MEM 0x3
879 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
883 mbx_cmd_t *mcp = &mc;
884 struct qla_hw_data *ha = vha->hw;
886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
887 "Entered %s.\n", __func__);
889 memset(mcp->mb, 0 , sizeof(mcp->mb));
890 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
891 mcp->mb[1] = CONFIG_XLOGINS_MEM;
892 mcp->mb[2] = MSW(phys_addr);
893 mcp->mb[3] = LSW(phys_addr);
894 mcp->mb[6] = MSW(MSD(phys_addr));
895 mcp->mb[7] = LSW(MSD(phys_addr));
896 mcp->mb[8] = MSW(ha->exlogin_size);
897 mcp->mb[9] = LSW(ha->exlogin_size);
898 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
899 mcp->in_mb = MBX_11|MBX_0;
900 mcp->tov = MBX_TOV_SECONDS;
902 rval = qla2x00_mailbox_command(vha, mcp);
903 if (rval != QLA_SUCCESS) {
905 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
908 "Done %s.\n", __func__);
915 * qla_get_exchoffld_status
916 * Get exchange offload status
917 * uses the memory offload control/status Mailbox
920 * ha: adapter state pointer.
921 * fwopt: firmware options
924 * qla2x00 local function status
929 #define FETCH_XCHOFFLD_STAT 0x2
931 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
932 uint16_t *ex_logins_cnt)
936 mbx_cmd_t *mcp = &mc;
938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
939 "Entered %s\n", __func__);
941 memset(mcp->mb, 0 , sizeof(mcp->mb));
942 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
943 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
944 mcp->out_mb = MBX_1|MBX_0;
945 mcp->in_mb = MBX_10|MBX_4|MBX_0;
946 mcp->tov = MBX_TOV_SECONDS;
949 rval = qla2x00_mailbox_command(vha, mcp);
950 if (rval != QLA_SUCCESS) {
951 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
953 *buf_sz = mcp->mb[4];
954 *ex_logins_cnt = mcp->mb[10];
956 ql_log(ql_log_info, vha, 0x118e,
957 "buffer size 0x%x, exchange offload count=%d\n",
958 mcp->mb[4], mcp->mb[10]);
960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
961 "Done %s.\n", __func__);
968 * qla_set_exchoffld_mem_cfg
969 * Set exchange offload memory configuration
970 * Mbx needs to be issues before init_cb is set
973 * ha: adapter state pointer.
974 * buffer: buffer pointer
975 * phys_addr: physical address of buffer
976 * size: size of buffer
977 * TARGET_QUEUE_LOCK must be released
978 * ADAPTER_STATE_LOCK must be release
981 * qla2x00 local funxtion status code.
986 #define CONFIG_XCHOFFLD_MEM 0x3
988 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
992 mbx_cmd_t *mcp = &mc;
993 struct qla_hw_data *ha = vha->hw;
995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
996 "Entered %s.\n", __func__);
998 memset(mcp->mb, 0 , sizeof(mcp->mb));
999 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1000 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1001 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1002 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1003 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1004 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1005 mcp->mb[8] = MSW(ha->exchoffld_size);
1006 mcp->mb[9] = LSW(ha->exchoffld_size);
1007 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1008 mcp->in_mb = MBX_11|MBX_0;
1009 mcp->tov = MBX_TOV_SECONDS;
1011 rval = qla2x00_mailbox_command(vha, mcp);
1012 if (rval != QLA_SUCCESS) {
1014 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1017 "Done %s.\n", __func__);
1024 * qla2x00_get_fw_version
1025 * Get firmware version.
1028 * ha: adapter state pointer.
1029 * major: pointer for major number.
1030 * minor: pointer for minor number.
1031 * subminor: pointer for subminor number.
1034 * qla2x00 local function return status code.
1040 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1044 mbx_cmd_t *mcp = &mc;
1045 struct qla_hw_data *ha = vha->hw;
1047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1048 "Entered %s.\n", __func__);
1050 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1051 mcp->out_mb = MBX_0;
1052 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1053 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1054 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1055 if (IS_FWI2_CAPABLE(ha))
1056 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1057 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1059 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1060 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1063 mcp->tov = MBX_TOV_SECONDS;
1064 rval = qla2x00_mailbox_command(vha, mcp);
1065 if (rval != QLA_SUCCESS)
1068 /* Return mailbox data. */
1069 ha->fw_major_version = mcp->mb[1];
1070 ha->fw_minor_version = mcp->mb[2];
1071 ha->fw_subminor_version = mcp->mb[3];
1072 ha->fw_attributes = mcp->mb[6];
1073 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1074 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1076 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1078 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1079 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1080 ha->mpi_version[1] = mcp->mb[11] >> 8;
1081 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1082 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1083 ha->phy_version[0] = mcp->mb[8] & 0xff;
1084 ha->phy_version[1] = mcp->mb[9] >> 8;
1085 ha->phy_version[2] = mcp->mb[9] & 0xff;
1088 if (IS_FWI2_CAPABLE(ha)) {
1089 ha->fw_attributes_h = mcp->mb[15];
1090 ha->fw_attributes_ext[0] = mcp->mb[16];
1091 ha->fw_attributes_ext[1] = mcp->mb[17];
1092 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1093 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1094 __func__, mcp->mb[15], mcp->mb[6]);
1095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1096 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1097 __func__, mcp->mb[17], mcp->mb[16]);
1099 if (ha->fw_attributes_h & 0x4)
1100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1101 "%s: Firmware supports Extended Login 0x%x\n",
1102 __func__, ha->fw_attributes_h);
1104 if (ha->fw_attributes_h & 0x8)
1105 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1106 "%s: Firmware supports Exchange Offload 0x%x\n",
1107 __func__, ha->fw_attributes_h);
1110 * FW supports nvme and driver load parameter requested nvme.
1111 * BIT 26 of fw_attributes indicates NVMe support.
1113 if ((ha->fw_attributes_h &
1114 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1116 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1117 vha->flags.nvme_first_burst = 1;
1119 vha->flags.nvme_enabled = 1;
1120 ql_log(ql_log_info, vha, 0xd302,
1121 "%s: FC-NVMe is Enabled (0x%x)\n",
1122 __func__, ha->fw_attributes_h);
1126 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1127 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1128 ha->serdes_version[1] = mcp->mb[8] >> 8;
1129 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1130 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1131 ha->mpi_version[1] = mcp->mb[11] >> 8;
1132 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1133 ha->pep_version[0] = mcp->mb[13] & 0xff;
1134 ha->pep_version[1] = mcp->mb[14] >> 8;
1135 ha->pep_version[2] = mcp->mb[14] & 0xff;
1136 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1137 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1138 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1139 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1140 if (IS_QLA28XX(ha)) {
1141 if (mcp->mb[16] & BIT_10) {
1142 ql_log(ql_log_info, vha, 0xffff,
1143 "FW support secure flash updates\n");
1144 ha->flags.secure_fw = 1;
1150 if (rval != QLA_SUCCESS) {
1152 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1155 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1156 "Done %s.\n", __func__);
1162 * qla2x00_get_fw_options
1163 * Set firmware options.
1166 * ha = adapter block pointer.
1167 * fwopt = pointer for firmware options.
1170 * qla2x00 local function return status code.
1176 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1180 mbx_cmd_t *mcp = &mc;
1182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1183 "Entered %s.\n", __func__);
1185 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1186 mcp->out_mb = MBX_0;
1187 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1188 mcp->tov = MBX_TOV_SECONDS;
1190 rval = qla2x00_mailbox_command(vha, mcp);
1192 if (rval != QLA_SUCCESS) {
1194 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1196 fwopts[0] = mcp->mb[0];
1197 fwopts[1] = mcp->mb[1];
1198 fwopts[2] = mcp->mb[2];
1199 fwopts[3] = mcp->mb[3];
1201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1202 "Done %s.\n", __func__);
1210 * qla2x00_set_fw_options
1211 * Set firmware options.
1214 * ha = adapter block pointer.
1215 * fwopt = pointer for firmware options.
1218 * qla2x00 local function return status code.
1224 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1228 mbx_cmd_t *mcp = &mc;
1230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1231 "Entered %s.\n", __func__);
1233 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1234 mcp->mb[1] = fwopts[1];
1235 mcp->mb[2] = fwopts[2];
1236 mcp->mb[3] = fwopts[3];
1237 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1239 if (IS_FWI2_CAPABLE(vha->hw)) {
1240 mcp->in_mb |= MBX_1;
1241 mcp->mb[10] = fwopts[10];
1242 mcp->out_mb |= MBX_10;
1244 mcp->mb[10] = fwopts[10];
1245 mcp->mb[11] = fwopts[11];
1246 mcp->mb[12] = 0; /* Undocumented, but used */
1247 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1249 mcp->tov = MBX_TOV_SECONDS;
1251 rval = qla2x00_mailbox_command(vha, mcp);
1253 fwopts[0] = mcp->mb[0];
1255 if (rval != QLA_SUCCESS) {
1257 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1258 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1262 "Done %s.\n", __func__);
1269 * qla2x00_mbx_reg_test
1270 * Mailbox register wrap test.
1273 * ha = adapter block pointer.
1274 * TARGET_QUEUE_LOCK must be released.
1275 * ADAPTER_STATE_LOCK must be released.
1278 * qla2x00 local function return status code.
1284 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1288 mbx_cmd_t *mcp = &mc;
1290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1291 "Entered %s.\n", __func__);
1293 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1294 mcp->mb[1] = 0xAAAA;
1295 mcp->mb[2] = 0x5555;
1296 mcp->mb[3] = 0xAA55;
1297 mcp->mb[4] = 0x55AA;
1298 mcp->mb[5] = 0xA5A5;
1299 mcp->mb[6] = 0x5A5A;
1300 mcp->mb[7] = 0x2525;
1301 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1302 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1303 mcp->tov = MBX_TOV_SECONDS;
1305 rval = qla2x00_mailbox_command(vha, mcp);
1307 if (rval == QLA_SUCCESS) {
1308 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1309 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1310 rval = QLA_FUNCTION_FAILED;
1311 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1312 mcp->mb[7] != 0x2525)
1313 rval = QLA_FUNCTION_FAILED;
1316 if (rval != QLA_SUCCESS) {
1318 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1322 "Done %s.\n", __func__);
1329 * qla2x00_verify_checksum
1330 * Verify firmware checksum.
1333 * ha = adapter block pointer.
1334 * TARGET_QUEUE_LOCK must be released.
1335 * ADAPTER_STATE_LOCK must be released.
1338 * qla2x00 local function return status code.
1344 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1348 mbx_cmd_t *mcp = &mc;
1350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1351 "Entered %s.\n", __func__);
1353 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1354 mcp->out_mb = MBX_0;
1356 if (IS_FWI2_CAPABLE(vha->hw)) {
1357 mcp->mb[1] = MSW(risc_addr);
1358 mcp->mb[2] = LSW(risc_addr);
1359 mcp->out_mb |= MBX_2|MBX_1;
1360 mcp->in_mb |= MBX_2|MBX_1;
1362 mcp->mb[1] = LSW(risc_addr);
1363 mcp->out_mb |= MBX_1;
1364 mcp->in_mb |= MBX_1;
1367 mcp->tov = MBX_TOV_SECONDS;
1369 rval = qla2x00_mailbox_command(vha, mcp);
1371 if (rval != QLA_SUCCESS) {
1372 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1373 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1374 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1376 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1377 "Done %s.\n", __func__);
1384 * qla2x00_issue_iocb
1385 * Issue IOCB using mailbox command
1388 * ha = adapter state pointer.
1389 * buffer = buffer pointer.
1390 * phys_addr = physical address of buffer.
1391 * size = size of buffer.
1392 * TARGET_QUEUE_LOCK must be released.
1393 * ADAPTER_STATE_LOCK must be released.
1396 * qla2x00 local function return status code.
1402 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1403 dma_addr_t phys_addr, size_t size, uint32_t tov)
1407 mbx_cmd_t *mcp = &mc;
1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1410 "Entered %s.\n", __func__);
1412 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1414 mcp->mb[2] = MSW(phys_addr);
1415 mcp->mb[3] = LSW(phys_addr);
1416 mcp->mb[6] = MSW(MSD(phys_addr));
1417 mcp->mb[7] = LSW(MSD(phys_addr));
1418 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1419 mcp->in_mb = MBX_2|MBX_0;
1422 rval = qla2x00_mailbox_command(vha, mcp);
1424 if (rval != QLA_SUCCESS) {
1426 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1428 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1430 /* Mask reserved bits. */
1431 sts_entry->entry_status &=
1432 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1434 "Done %s.\n", __func__);
1441 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1444 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1449 * qla2x00_abort_command
1450 * Abort command aborts a specified IOCB.
1453 * ha = adapter block pointer.
1454 * sp = SB structure pointer.
1457 * qla2x00 local function return status code.
1463 qla2x00_abort_command(srb_t *sp)
1465 unsigned long flags = 0;
1467 uint32_t handle = 0;
1469 mbx_cmd_t *mcp = &mc;
1470 fc_port_t *fcport = sp->fcport;
1471 scsi_qla_host_t *vha = fcport->vha;
1472 struct qla_hw_data *ha = vha->hw;
1473 struct req_que *req;
1474 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1477 "Entered %s.\n", __func__);
1479 if (vha->flags.qpairs_available && sp->qpair)
1480 req = sp->qpair->req;
1484 spin_lock_irqsave(&ha->hardware_lock, flags);
1485 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1486 if (req->outstanding_cmds[handle] == sp)
1489 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1491 if (handle == req->num_outstanding_cmds) {
1492 /* command not found */
1493 return QLA_FUNCTION_FAILED;
1496 mcp->mb[0] = MBC_ABORT_COMMAND;
1497 if (HAS_EXTENDED_IDS(ha))
1498 mcp->mb[1] = fcport->loop_id;
1500 mcp->mb[1] = fcport->loop_id << 8;
1501 mcp->mb[2] = (uint16_t)handle;
1502 mcp->mb[3] = (uint16_t)(handle >> 16);
1503 mcp->mb[6] = (uint16_t)cmd->device->lun;
1504 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1506 mcp->tov = MBX_TOV_SECONDS;
1508 rval = qla2x00_mailbox_command(vha, mcp);
1510 if (rval != QLA_SUCCESS) {
1511 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1514 "Done %s.\n", __func__);
1521 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1525 mbx_cmd_t *mcp = &mc;
1526 scsi_qla_host_t *vha;
1530 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1531 "Entered %s.\n", __func__);
1533 mcp->mb[0] = MBC_ABORT_TARGET;
1534 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1535 if (HAS_EXTENDED_IDS(vha->hw)) {
1536 mcp->mb[1] = fcport->loop_id;
1538 mcp->out_mb |= MBX_10;
1540 mcp->mb[1] = fcport->loop_id << 8;
1542 mcp->mb[2] = vha->hw->loop_reset_delay;
1543 mcp->mb[9] = vha->vp_idx;
1546 mcp->tov = MBX_TOV_SECONDS;
1548 rval = qla2x00_mailbox_command(vha, mcp);
1549 if (rval != QLA_SUCCESS) {
1550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1551 "Failed=%x.\n", rval);
1554 /* Issue marker IOCB. */
1555 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1557 if (rval2 != QLA_SUCCESS) {
1558 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1559 "Failed to issue marker IOCB (%x).\n", rval2);
1561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1562 "Done %s.\n", __func__);
1569 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1573 mbx_cmd_t *mcp = &mc;
1574 scsi_qla_host_t *vha;
1578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1579 "Entered %s.\n", __func__);
1581 mcp->mb[0] = MBC_LUN_RESET;
1582 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1583 if (HAS_EXTENDED_IDS(vha->hw))
1584 mcp->mb[1] = fcport->loop_id;
1586 mcp->mb[1] = fcport->loop_id << 8;
1587 mcp->mb[2] = (u32)l;
1589 mcp->mb[9] = vha->vp_idx;
1592 mcp->tov = MBX_TOV_SECONDS;
1594 rval = qla2x00_mailbox_command(vha, mcp);
1595 if (rval != QLA_SUCCESS) {
1596 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1599 /* Issue marker IOCB. */
1600 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1602 if (rval2 != QLA_SUCCESS) {
1603 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1604 "Failed to issue marker IOCB (%x).\n", rval2);
1606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1607 "Done %s.\n", __func__);
1614 * qla2x00_get_adapter_id
1615 * Get adapter ID and topology.
1618 * ha = adapter block pointer.
1619 * id = pointer for loop ID.
1620 * al_pa = pointer for AL_PA.
1621 * area = pointer for area.
1622 * domain = pointer for domain.
1623 * top = pointer for topology.
1624 * TARGET_QUEUE_LOCK must be released.
1625 * ADAPTER_STATE_LOCK must be released.
1628 * qla2x00 local function return status code.
1634 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1635 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1639 mbx_cmd_t *mcp = &mc;
1641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1642 "Entered %s.\n", __func__);
1644 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1645 mcp->mb[9] = vha->vp_idx;
1646 mcp->out_mb = MBX_9|MBX_0;
1647 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1648 if (IS_CNA_CAPABLE(vha->hw))
1649 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1650 if (IS_FWI2_CAPABLE(vha->hw))
1651 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1652 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1653 mcp->in_mb |= MBX_15;
1654 mcp->tov = MBX_TOV_SECONDS;
1656 rval = qla2x00_mailbox_command(vha, mcp);
1657 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1658 rval = QLA_COMMAND_ERROR;
1659 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1660 rval = QLA_INVALID_COMMAND;
1664 *al_pa = LSB(mcp->mb[2]);
1665 *area = MSB(mcp->mb[2]);
1666 *domain = LSB(mcp->mb[3]);
1668 *sw_cap = mcp->mb[7];
1670 if (rval != QLA_SUCCESS) {
1672 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1675 "Done %s.\n", __func__);
1677 if (IS_CNA_CAPABLE(vha->hw)) {
1678 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1679 vha->fcoe_fcf_idx = mcp->mb[10];
1680 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1681 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1682 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1683 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1684 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1685 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1687 /* If FA-WWN supported */
1688 if (IS_FAWWN_CAPABLE(vha->hw)) {
1689 if (mcp->mb[7] & BIT_14) {
1690 vha->port_name[0] = MSB(mcp->mb[16]);
1691 vha->port_name[1] = LSB(mcp->mb[16]);
1692 vha->port_name[2] = MSB(mcp->mb[17]);
1693 vha->port_name[3] = LSB(mcp->mb[17]);
1694 vha->port_name[4] = MSB(mcp->mb[18]);
1695 vha->port_name[5] = LSB(mcp->mb[18]);
1696 vha->port_name[6] = MSB(mcp->mb[19]);
1697 vha->port_name[7] = LSB(mcp->mb[19]);
1698 fc_host_port_name(vha->host) =
1699 wwn_to_u64(vha->port_name);
1700 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1701 "FA-WWN acquired %016llx\n",
1702 wwn_to_u64(vha->port_name));
1706 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1707 vha->bbcr = mcp->mb[15];
1714 * qla2x00_get_retry_cnt
1715 * Get current firmware login retry count and delay.
1718 * ha = adapter block pointer.
1719 * retry_cnt = pointer to login retry count.
1720 * tov = pointer to login timeout value.
1723 * qla2x00 local function return status code.
1729 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1735 mbx_cmd_t *mcp = &mc;
1737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1738 "Entered %s.\n", __func__);
1740 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1741 mcp->out_mb = MBX_0;
1742 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1743 mcp->tov = MBX_TOV_SECONDS;
1745 rval = qla2x00_mailbox_command(vha, mcp);
1747 if (rval != QLA_SUCCESS) {
1749 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1750 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1752 /* Convert returned data and check our values. */
1753 *r_a_tov = mcp->mb[3] / 2;
1754 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1755 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1756 /* Update to the larger values */
1757 *retry_cnt = (uint8_t)mcp->mb[1];
1761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1762 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1769 * qla2x00_init_firmware
1770 * Initialize adapter firmware.
1773 * ha = adapter block pointer.
1774 * dptr = Initialization control block pointer.
1775 * size = size of initialization control block.
1776 * TARGET_QUEUE_LOCK must be released.
1777 * ADAPTER_STATE_LOCK must be released.
1780 * qla2x00 local function return status code.
1786 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1790 mbx_cmd_t *mcp = &mc;
1791 struct qla_hw_data *ha = vha->hw;
1793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1794 "Entered %s.\n", __func__);
1796 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1797 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1798 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1800 if (ha->flags.npiv_supported)
1801 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1803 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1806 mcp->mb[2] = MSW(ha->init_cb_dma);
1807 mcp->mb[3] = LSW(ha->init_cb_dma);
1808 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1809 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1810 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1811 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1813 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1814 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1815 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1816 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1817 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1818 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1820 /* 1 and 2 should normally be captured. */
1821 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1822 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1823 /* mb3 is additional info about the installed SFP. */
1824 mcp->in_mb |= MBX_3;
1825 mcp->buf_size = size;
1826 mcp->flags = MBX_DMA_OUT;
1827 mcp->tov = MBX_TOV_SECONDS;
1828 rval = qla2x00_mailbox_command(vha, mcp);
1830 if (rval != QLA_SUCCESS) {
1832 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1833 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1834 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1836 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1837 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1838 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1840 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1841 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1842 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1843 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1846 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1847 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1848 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1849 "Invalid SFP/Validation Failed\n");
1851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1852 "Done %s.\n", __func__);
1860 * qla2x00_get_port_database
1861 * Issue normal/enhanced get port database mailbox command
1862 * and copy device name as necessary.
1865 * ha = adapter state pointer.
1866 * dev = structure pointer.
1867 * opt = enhanced cmd option byte.
1870 * qla2x00 local function return status code.
1876 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1880 mbx_cmd_t *mcp = &mc;
1881 port_database_t *pd;
1882 struct port_database_24xx *pd24;
1884 struct qla_hw_data *ha = vha->hw;
1886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1887 "Entered %s.\n", __func__);
1890 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1892 ql_log(ql_log_warn, vha, 0x1050,
1893 "Failed to allocate port database structure.\n");
1895 return QLA_MEMORY_ALLOC_FAILED;
1898 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1899 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1900 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1901 mcp->mb[2] = MSW(pd_dma);
1902 mcp->mb[3] = LSW(pd_dma);
1903 mcp->mb[6] = MSW(MSD(pd_dma));
1904 mcp->mb[7] = LSW(MSD(pd_dma));
1905 mcp->mb[9] = vha->vp_idx;
1906 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1908 if (IS_FWI2_CAPABLE(ha)) {
1909 mcp->mb[1] = fcport->loop_id;
1911 mcp->out_mb |= MBX_10|MBX_1;
1912 mcp->in_mb |= MBX_1;
1913 } else if (HAS_EXTENDED_IDS(ha)) {
1914 mcp->mb[1] = fcport->loop_id;
1916 mcp->out_mb |= MBX_10|MBX_1;
1918 mcp->mb[1] = fcport->loop_id << 8 | opt;
1919 mcp->out_mb |= MBX_1;
1921 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1922 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1923 mcp->flags = MBX_DMA_IN;
1924 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1925 rval = qla2x00_mailbox_command(vha, mcp);
1926 if (rval != QLA_SUCCESS)
1929 if (IS_FWI2_CAPABLE(ha)) {
1931 u8 current_login_state, last_login_state;
1933 pd24 = (struct port_database_24xx *) pd;
1935 /* Check for logged in state. */
1936 if (NVME_TARGET(ha, fcport)) {
1937 current_login_state = pd24->current_login_state >> 4;
1938 last_login_state = pd24->last_login_state >> 4;
1940 current_login_state = pd24->current_login_state & 0xf;
1941 last_login_state = pd24->last_login_state & 0xf;
1943 fcport->current_login_state = pd24->current_login_state;
1944 fcport->last_login_state = pd24->last_login_state;
1946 /* Check for logged in state. */
1947 if (current_login_state != PDS_PRLI_COMPLETE &&
1948 last_login_state != PDS_PRLI_COMPLETE) {
1949 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1950 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1951 current_login_state, last_login_state,
1953 rval = QLA_FUNCTION_FAILED;
1959 if (fcport->loop_id == FC_NO_LOOP_ID ||
1960 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1961 memcmp(fcport->port_name, pd24->port_name, 8))) {
1962 /* We lost the device mid way. */
1963 rval = QLA_NOT_LOGGED_IN;
1967 /* Names are little-endian. */
1968 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1969 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1971 /* Get port_id of device. */
1972 fcport->d_id.b.domain = pd24->port_id[0];
1973 fcport->d_id.b.area = pd24->port_id[1];
1974 fcport->d_id.b.al_pa = pd24->port_id[2];
1975 fcport->d_id.b.rsvd_1 = 0;
1977 /* If not target must be initiator or unknown type. */
1978 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1979 fcport->port_type = FCT_INITIATOR;
1981 fcport->port_type = FCT_TARGET;
1983 /* Passback COS information. */
1984 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1985 FC_COS_CLASS2 : FC_COS_CLASS3;
1987 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1988 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1992 /* Check for logged in state. */
1993 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1994 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1995 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1996 "Unable to verify login-state (%x/%x) - "
1997 "portid=%02x%02x%02x.\n", pd->master_state,
1998 pd->slave_state, fcport->d_id.b.domain,
1999 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2000 rval = QLA_FUNCTION_FAILED;
2004 if (fcport->loop_id == FC_NO_LOOP_ID ||
2005 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2006 memcmp(fcport->port_name, pd->port_name, 8))) {
2007 /* We lost the device mid way. */
2008 rval = QLA_NOT_LOGGED_IN;
2012 /* Names are little-endian. */
2013 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2014 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2016 /* Get port_id of device. */
2017 fcport->d_id.b.domain = pd->port_id[0];
2018 fcport->d_id.b.area = pd->port_id[3];
2019 fcport->d_id.b.al_pa = pd->port_id[2];
2020 fcport->d_id.b.rsvd_1 = 0;
2022 /* If not target must be initiator or unknown type. */
2023 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2024 fcport->port_type = FCT_INITIATOR;
2026 fcport->port_type = FCT_TARGET;
2028 /* Passback COS information. */
2029 fcport->supported_classes = (pd->options & BIT_4) ?
2030 FC_COS_CLASS2 : FC_COS_CLASS3;
2034 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2037 if (rval != QLA_SUCCESS) {
2038 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2039 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2040 mcp->mb[0], mcp->mb[1]);
2042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2043 "Done %s.\n", __func__);
2050 * qla2x00_get_firmware_state
2051 * Get adapter firmware state.
2054 * ha = adapter block pointer.
2055 * dptr = pointer for firmware state.
2056 * TARGET_QUEUE_LOCK must be released.
2057 * ADAPTER_STATE_LOCK must be released.
2060 * qla2x00 local function return status code.
2066 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2070 mbx_cmd_t *mcp = &mc;
2071 struct qla_hw_data *ha = vha->hw;
2073 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2074 "Entered %s.\n", __func__);
2076 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2077 mcp->out_mb = MBX_0;
2078 if (IS_FWI2_CAPABLE(vha->hw))
2079 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2081 mcp->in_mb = MBX_1|MBX_0;
2082 mcp->tov = MBX_TOV_SECONDS;
2084 rval = qla2x00_mailbox_command(vha, mcp);
2086 /* Return firmware states. */
2087 states[0] = mcp->mb[1];
2088 if (IS_FWI2_CAPABLE(vha->hw)) {
2089 states[1] = mcp->mb[2];
2090 states[2] = mcp->mb[3]; /* SFP info */
2091 states[3] = mcp->mb[4];
2092 states[4] = mcp->mb[5];
2093 states[5] = mcp->mb[6]; /* DPORT status */
2096 if (rval != QLA_SUCCESS) {
2098 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2100 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2101 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2102 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2103 "Invalid SFP/Validation Failed\n");
2105 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2106 "Done %s.\n", __func__);
2113 * qla2x00_get_port_name
2114 * Issue get port name mailbox command.
2115 * Returned name is in big endian format.
2118 * ha = adapter block pointer.
2119 * loop_id = loop ID of device.
2120 * name = pointer for name.
2121 * TARGET_QUEUE_LOCK must be released.
2122 * ADAPTER_STATE_LOCK must be released.
2125 * qla2x00 local function return status code.
2131 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2136 mbx_cmd_t *mcp = &mc;
2138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2139 "Entered %s.\n", __func__);
2141 mcp->mb[0] = MBC_GET_PORT_NAME;
2142 mcp->mb[9] = vha->vp_idx;
2143 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2144 if (HAS_EXTENDED_IDS(vha->hw)) {
2145 mcp->mb[1] = loop_id;
2147 mcp->out_mb |= MBX_10;
2149 mcp->mb[1] = loop_id << 8 | opt;
2152 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2153 mcp->tov = MBX_TOV_SECONDS;
2155 rval = qla2x00_mailbox_command(vha, mcp);
2157 if (rval != QLA_SUCCESS) {
2159 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2162 /* This function returns name in big endian. */
2163 name[0] = MSB(mcp->mb[2]);
2164 name[1] = LSB(mcp->mb[2]);
2165 name[2] = MSB(mcp->mb[3]);
2166 name[3] = LSB(mcp->mb[3]);
2167 name[4] = MSB(mcp->mb[6]);
2168 name[5] = LSB(mcp->mb[6]);
2169 name[6] = MSB(mcp->mb[7]);
2170 name[7] = LSB(mcp->mb[7]);
2173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2174 "Done %s.\n", __func__);
2181 * qla24xx_link_initialization
2182 * Issue link initialization mailbox command.
2185 * ha = adapter block pointer.
2186 * TARGET_QUEUE_LOCK must be released.
2187 * ADAPTER_STATE_LOCK must be released.
2190 * qla2x00 local function return status code.
2196 qla24xx_link_initialize(scsi_qla_host_t *vha)
2200 mbx_cmd_t *mcp = &mc;
2202 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2203 "Entered %s.\n", __func__);
2205 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2206 return QLA_FUNCTION_FAILED;
2208 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2210 if (vha->hw->operating_mode == LOOP)
2211 mcp->mb[1] |= BIT_6;
2213 mcp->mb[1] |= BIT_5;
2216 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2218 mcp->tov = MBX_TOV_SECONDS;
2220 rval = qla2x00_mailbox_command(vha, mcp);
2222 if (rval != QLA_SUCCESS) {
2223 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2226 "Done %s.\n", __func__);
2234 * Issue LIP reset mailbox command.
2237 * ha = adapter block pointer.
2238 * TARGET_QUEUE_LOCK must be released.
2239 * ADAPTER_STATE_LOCK must be released.
2242 * qla2x00 local function return status code.
2248 qla2x00_lip_reset(scsi_qla_host_t *vha)
2252 mbx_cmd_t *mcp = &mc;
2254 ql_dbg(ql_dbg_disc, vha, 0x105a,
2255 "Entered %s.\n", __func__);
2257 if (IS_CNA_CAPABLE(vha->hw)) {
2258 /* Logout across all FCFs. */
2259 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2262 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2263 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2264 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2267 mcp->mb[3] = vha->hw->loop_reset_delay;
2268 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2270 mcp->mb[0] = MBC_LIP_RESET;
2271 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2272 if (HAS_EXTENDED_IDS(vha->hw)) {
2273 mcp->mb[1] = 0x00ff;
2275 mcp->out_mb |= MBX_10;
2277 mcp->mb[1] = 0xff00;
2279 mcp->mb[2] = vha->hw->loop_reset_delay;
2283 mcp->tov = MBX_TOV_SECONDS;
2285 rval = qla2x00_mailbox_command(vha, mcp);
2287 if (rval != QLA_SUCCESS) {
2289 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2293 "Done %s.\n", __func__);
2304 * ha = adapter block pointer.
2305 * sns = pointer for command.
2306 * cmd_size = command size.
2307 * buf_size = response/command size.
2308 * TARGET_QUEUE_LOCK must be released.
2309 * ADAPTER_STATE_LOCK must be released.
2312 * qla2x00 local function return status code.
2318 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2319 uint16_t cmd_size, size_t buf_size)
2323 mbx_cmd_t *mcp = &mc;
2325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2326 "Entered %s.\n", __func__);
2328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2329 "Retry cnt=%d ratov=%d total tov=%d.\n",
2330 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2332 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2333 mcp->mb[1] = cmd_size;
2334 mcp->mb[2] = MSW(sns_phys_address);
2335 mcp->mb[3] = LSW(sns_phys_address);
2336 mcp->mb[6] = MSW(MSD(sns_phys_address));
2337 mcp->mb[7] = LSW(MSD(sns_phys_address));
2338 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2339 mcp->in_mb = MBX_0|MBX_1;
2340 mcp->buf_size = buf_size;
2341 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2342 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2343 rval = qla2x00_mailbox_command(vha, mcp);
2345 if (rval != QLA_SUCCESS) {
2347 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2348 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2349 rval, mcp->mb[0], mcp->mb[1]);
2352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2353 "Done %s.\n", __func__);
2360 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2361 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2365 struct logio_entry_24xx *lg;
2368 struct qla_hw_data *ha = vha->hw;
2369 struct req_que *req;
2371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2372 "Entered %s.\n", __func__);
2374 if (vha->vp_idx && vha->qpair)
2375 req = vha->qpair->req;
2377 req = ha->req_q_map[0];
2379 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2381 ql_log(ql_log_warn, vha, 0x1062,
2382 "Failed to allocate login IOCB.\n");
2383 return QLA_MEMORY_ALLOC_FAILED;
2386 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2387 lg->entry_count = 1;
2388 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2389 lg->nport_handle = cpu_to_le16(loop_id);
2390 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2392 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2394 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2395 lg->port_id[0] = al_pa;
2396 lg->port_id[1] = area;
2397 lg->port_id[2] = domain;
2398 lg->vp_index = vha->vp_idx;
2399 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2400 (ha->r_a_tov / 10 * 2) + 2);
2401 if (rval != QLA_SUCCESS) {
2402 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2403 "Failed to issue login IOCB (%x).\n", rval);
2404 } else if (lg->entry_status != 0) {
2405 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2406 "Failed to complete IOCB -- error status (%x).\n",
2408 rval = QLA_FUNCTION_FAILED;
2409 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2410 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2411 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2413 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2414 "Failed to complete IOCB -- completion status (%x) "
2415 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2419 case LSC_SCODE_PORTID_USED:
2420 mb[0] = MBS_PORT_ID_USED;
2421 mb[1] = LSW(iop[1]);
2423 case LSC_SCODE_NPORT_USED:
2424 mb[0] = MBS_LOOP_ID_USED;
2426 case LSC_SCODE_NOLINK:
2427 case LSC_SCODE_NOIOCB:
2428 case LSC_SCODE_NOXCB:
2429 case LSC_SCODE_CMD_FAILED:
2430 case LSC_SCODE_NOFABRIC:
2431 case LSC_SCODE_FW_NOT_READY:
2432 case LSC_SCODE_NOT_LOGGED_IN:
2433 case LSC_SCODE_NOPCB:
2434 case LSC_SCODE_ELS_REJECT:
2435 case LSC_SCODE_CMD_PARAM_ERR:
2436 case LSC_SCODE_NONPORT:
2437 case LSC_SCODE_LOGGED_IN:
2438 case LSC_SCODE_NOFLOGI_ACC:
2440 mb[0] = MBS_COMMAND_ERROR;
2444 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2445 "Done %s.\n", __func__);
2447 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2449 mb[0] = MBS_COMMAND_COMPLETE;
2451 if (iop[0] & BIT_4) {
2457 /* Passback COS information. */
2459 if (lg->io_parameter[7] || lg->io_parameter[8])
2460 mb[10] |= BIT_0; /* Class 2. */
2461 if (lg->io_parameter[9] || lg->io_parameter[10])
2462 mb[10] |= BIT_1; /* Class 3. */
2463 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2464 mb[10] |= BIT_7; /* Confirmed Completion
2469 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2475 * qla2x00_login_fabric
2476 * Issue login fabric port mailbox command.
2479 * ha = adapter block pointer.
2480 * loop_id = device loop ID.
2481 * domain = device domain.
2482 * area = device area.
2483 * al_pa = device AL_PA.
2484 * status = pointer for return status.
2485 * opt = command options.
2486 * TARGET_QUEUE_LOCK must be released.
2487 * ADAPTER_STATE_LOCK must be released.
2490 * qla2x00 local function return status code.
2496 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2497 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2501 mbx_cmd_t *mcp = &mc;
2502 struct qla_hw_data *ha = vha->hw;
2504 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2505 "Entered %s.\n", __func__);
2507 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2508 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2509 if (HAS_EXTENDED_IDS(ha)) {
2510 mcp->mb[1] = loop_id;
2512 mcp->out_mb |= MBX_10;
2514 mcp->mb[1] = (loop_id << 8) | opt;
2516 mcp->mb[2] = domain;
2517 mcp->mb[3] = area << 8 | al_pa;
2519 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2520 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2522 rval = qla2x00_mailbox_command(vha, mcp);
2524 /* Return mailbox statuses. */
2531 /* COS retrieved from Get-Port-Database mailbox command. */
2535 if (rval != QLA_SUCCESS) {
2536 /* RLU tmp code: need to change main mailbox_command function to
2537 * return ok even when the mailbox completion value is not
2538 * SUCCESS. The caller needs to be responsible to interpret
2539 * the return values of this mailbox command if we're not
2540 * to change too much of the existing code.
2542 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2543 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2544 mcp->mb[0] == 0x4006)
2548 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2549 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2550 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2553 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2554 "Done %s.\n", __func__);
2561 * qla2x00_login_local_device
2562 * Issue login loop port mailbox command.
2565 * ha = adapter block pointer.
2566 * loop_id = device loop ID.
2567 * opt = command options.
2570 * Return status code.
2577 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2578 uint16_t *mb_ret, uint8_t opt)
2582 mbx_cmd_t *mcp = &mc;
2583 struct qla_hw_data *ha = vha->hw;
2585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2586 "Entered %s.\n", __func__);
2588 if (IS_FWI2_CAPABLE(ha))
2589 return qla24xx_login_fabric(vha, fcport->loop_id,
2590 fcport->d_id.b.domain, fcport->d_id.b.area,
2591 fcport->d_id.b.al_pa, mb_ret, opt);
2593 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2594 if (HAS_EXTENDED_IDS(ha))
2595 mcp->mb[1] = fcport->loop_id;
2597 mcp->mb[1] = fcport->loop_id << 8;
2599 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2600 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2601 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2603 rval = qla2x00_mailbox_command(vha, mcp);
2605 /* Return mailbox statuses. */
2606 if (mb_ret != NULL) {
2607 mb_ret[0] = mcp->mb[0];
2608 mb_ret[1] = mcp->mb[1];
2609 mb_ret[6] = mcp->mb[6];
2610 mb_ret[7] = mcp->mb[7];
2613 if (rval != QLA_SUCCESS) {
2614 /* AV tmp code: need to change main mailbox_command function to
2615 * return ok even when the mailbox completion value is not
2616 * SUCCESS. The caller needs to be responsible to interpret
2617 * the return values of this mailbox command if we're not
2618 * to change too much of the existing code.
2620 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2623 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2624 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2625 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2629 "Done %s.\n", __func__);
2636 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2637 uint8_t area, uint8_t al_pa)
2640 struct logio_entry_24xx *lg;
2642 struct qla_hw_data *ha = vha->hw;
2643 struct req_que *req;
2645 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2646 "Entered %s.\n", __func__);
2648 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2650 ql_log(ql_log_warn, vha, 0x106e,
2651 "Failed to allocate logout IOCB.\n");
2652 return QLA_MEMORY_ALLOC_FAILED;
2656 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2657 lg->entry_count = 1;
2658 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2659 lg->nport_handle = cpu_to_le16(loop_id);
2661 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2663 lg->port_id[0] = al_pa;
2664 lg->port_id[1] = area;
2665 lg->port_id[2] = domain;
2666 lg->vp_index = vha->vp_idx;
2667 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2668 (ha->r_a_tov / 10 * 2) + 2);
2669 if (rval != QLA_SUCCESS) {
2670 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2671 "Failed to issue logout IOCB (%x).\n", rval);
2672 } else if (lg->entry_status != 0) {
2673 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2674 "Failed to complete IOCB -- error status (%x).\n",
2676 rval = QLA_FUNCTION_FAILED;
2677 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2678 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2679 "Failed to complete IOCB -- completion status (%x) "
2680 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2681 le32_to_cpu(lg->io_parameter[0]),
2682 le32_to_cpu(lg->io_parameter[1]));
2685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2686 "Done %s.\n", __func__);
2689 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2695 * qla2x00_fabric_logout
2696 * Issue logout fabric port mailbox command.
2699 * ha = adapter block pointer.
2700 * loop_id = device loop ID.
2701 * TARGET_QUEUE_LOCK must be released.
2702 * ADAPTER_STATE_LOCK must be released.
2705 * qla2x00 local function return status code.
2711 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2712 uint8_t area, uint8_t al_pa)
2716 mbx_cmd_t *mcp = &mc;
2718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2719 "Entered %s.\n", __func__);
2721 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2722 mcp->out_mb = MBX_1|MBX_0;
2723 if (HAS_EXTENDED_IDS(vha->hw)) {
2724 mcp->mb[1] = loop_id;
2726 mcp->out_mb |= MBX_10;
2728 mcp->mb[1] = loop_id << 8;
2731 mcp->in_mb = MBX_1|MBX_0;
2732 mcp->tov = MBX_TOV_SECONDS;
2734 rval = qla2x00_mailbox_command(vha, mcp);
2736 if (rval != QLA_SUCCESS) {
2738 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2739 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2743 "Done %s.\n", __func__);
2750 * qla2x00_full_login_lip
2751 * Issue full login LIP mailbox command.
2754 * ha = adapter block pointer.
2755 * TARGET_QUEUE_LOCK must be released.
2756 * ADAPTER_STATE_LOCK must be released.
2759 * qla2x00 local function return status code.
2765 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2769 mbx_cmd_t *mcp = &mc;
2771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2772 "Entered %s.\n", __func__);
2774 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2775 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2778 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2780 mcp->tov = MBX_TOV_SECONDS;
2782 rval = qla2x00_mailbox_command(vha, mcp);
2784 if (rval != QLA_SUCCESS) {
2786 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2790 "Done %s.\n", __func__);
2797 * qla2x00_get_id_list
2800 * ha = adapter block pointer.
2803 * qla2x00 local function return status code.
2809 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2814 mbx_cmd_t *mcp = &mc;
2816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2817 "Entered %s.\n", __func__);
2819 if (id_list == NULL)
2820 return QLA_FUNCTION_FAILED;
2822 mcp->mb[0] = MBC_GET_ID_LIST;
2823 mcp->out_mb = MBX_0;
2824 if (IS_FWI2_CAPABLE(vha->hw)) {
2825 mcp->mb[2] = MSW(id_list_dma);
2826 mcp->mb[3] = LSW(id_list_dma);
2827 mcp->mb[6] = MSW(MSD(id_list_dma));
2828 mcp->mb[7] = LSW(MSD(id_list_dma));
2830 mcp->mb[9] = vha->vp_idx;
2831 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2833 mcp->mb[1] = MSW(id_list_dma);
2834 mcp->mb[2] = LSW(id_list_dma);
2835 mcp->mb[3] = MSW(MSD(id_list_dma));
2836 mcp->mb[6] = LSW(MSD(id_list_dma));
2837 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2839 mcp->in_mb = MBX_1|MBX_0;
2840 mcp->tov = MBX_TOV_SECONDS;
2842 rval = qla2x00_mailbox_command(vha, mcp);
2844 if (rval != QLA_SUCCESS) {
2846 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2848 *entries = mcp->mb[1];
2849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2850 "Done %s.\n", __func__);
2857 * qla2x00_get_resource_cnts
2858 * Get current firmware resource counts.
2861 * ha = adapter block pointer.
2864 * qla2x00 local function return status code.
2870 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2872 struct qla_hw_data *ha = vha->hw;
2875 mbx_cmd_t *mcp = &mc;
2877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2878 "Entered %s.\n", __func__);
2880 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2881 mcp->out_mb = MBX_0;
2882 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2883 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2884 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2885 mcp->in_mb |= MBX_12;
2886 mcp->tov = MBX_TOV_SECONDS;
2888 rval = qla2x00_mailbox_command(vha, mcp);
2890 if (rval != QLA_SUCCESS) {
2892 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2893 "Failed mb[0]=%x.\n", mcp->mb[0]);
2895 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2896 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2897 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2898 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2899 mcp->mb[11], mcp->mb[12]);
2901 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2902 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2903 ha->cur_fw_xcb_count = mcp->mb[3];
2904 ha->orig_fw_xcb_count = mcp->mb[6];
2905 ha->cur_fw_iocb_count = mcp->mb[7];
2906 ha->orig_fw_iocb_count = mcp->mb[10];
2907 if (ha->flags.npiv_supported)
2908 ha->max_npiv_vports = mcp->mb[11];
2909 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2910 ha->fw_max_fcf_count = mcp->mb[12];
2917 * qla2x00_get_fcal_position_map
2918 * Get FCAL (LILP) position map using mailbox command
2921 * ha = adapter state pointer.
2922 * pos_map = buffer pointer (can be NULL).
2925 * qla2x00 local function return status code.
2931 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2935 mbx_cmd_t *mcp = &mc;
2937 dma_addr_t pmap_dma;
2938 struct qla_hw_data *ha = vha->hw;
2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2941 "Entered %s.\n", __func__);
2943 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2945 ql_log(ql_log_warn, vha, 0x1080,
2946 "Memory alloc failed.\n");
2947 return QLA_MEMORY_ALLOC_FAILED;
2950 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2951 mcp->mb[2] = MSW(pmap_dma);
2952 mcp->mb[3] = LSW(pmap_dma);
2953 mcp->mb[6] = MSW(MSD(pmap_dma));
2954 mcp->mb[7] = LSW(MSD(pmap_dma));
2955 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2956 mcp->in_mb = MBX_1|MBX_0;
2957 mcp->buf_size = FCAL_MAP_SIZE;
2958 mcp->flags = MBX_DMA_IN;
2959 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2960 rval = qla2x00_mailbox_command(vha, mcp);
2962 if (rval == QLA_SUCCESS) {
2963 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2964 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2965 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2966 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2970 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2972 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2974 if (rval != QLA_SUCCESS) {
2975 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2978 "Done %s.\n", __func__);
2985 * qla2x00_get_link_status
2988 * ha = adapter block pointer.
2989 * loop_id = device loop ID.
2990 * ret_buf = pointer to link status return buffer.
2994 * BIT_0 = mem alloc error.
2995 * BIT_1 = mailbox error.
2998 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2999 struct link_statistics *stats, dma_addr_t stats_dma)
3003 mbx_cmd_t *mcp = &mc;
3004 uint32_t *iter = (void *)stats;
3005 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3006 struct qla_hw_data *ha = vha->hw;
3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3009 "Entered %s.\n", __func__);
3011 mcp->mb[0] = MBC_GET_LINK_STATUS;
3012 mcp->mb[2] = MSW(LSD(stats_dma));
3013 mcp->mb[3] = LSW(LSD(stats_dma));
3014 mcp->mb[6] = MSW(MSD(stats_dma));
3015 mcp->mb[7] = LSW(MSD(stats_dma));
3016 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3018 if (IS_FWI2_CAPABLE(ha)) {
3019 mcp->mb[1] = loop_id;
3022 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3023 mcp->in_mb |= MBX_1;
3024 } else if (HAS_EXTENDED_IDS(ha)) {
3025 mcp->mb[1] = loop_id;
3027 mcp->out_mb |= MBX_10|MBX_1;
3029 mcp->mb[1] = loop_id << 8;
3030 mcp->out_mb |= MBX_1;
3032 mcp->tov = MBX_TOV_SECONDS;
3033 mcp->flags = IOCTL_CMD;
3034 rval = qla2x00_mailbox_command(vha, mcp);
3036 if (rval == QLA_SUCCESS) {
3037 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3038 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3039 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3040 rval = QLA_FUNCTION_FAILED;
3042 /* Re-endianize - firmware data is le32. */
3043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3044 "Done %s.\n", __func__);
3045 for ( ; dwords--; iter++)
3050 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3057 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3058 dma_addr_t stats_dma, uint16_t options)
3062 mbx_cmd_t *mcp = &mc;
3063 uint32_t *iter, dwords;
3065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3066 "Entered %s.\n", __func__);
3068 memset(&mc, 0, sizeof(mc));
3069 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3070 mc.mb[2] = MSW(stats_dma);
3071 mc.mb[3] = LSW(stats_dma);
3072 mc.mb[6] = MSW(MSD(stats_dma));
3073 mc.mb[7] = LSW(MSD(stats_dma));
3074 mc.mb[8] = sizeof(struct link_statistics) / 4;
3075 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3076 mc.mb[10] = cpu_to_le16(options);
3078 rval = qla24xx_send_mb_cmd(vha, &mc);
3080 if (rval == QLA_SUCCESS) {
3081 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3082 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3083 "Failed mb[0]=%x.\n", mcp->mb[0]);
3084 rval = QLA_FUNCTION_FAILED;
3086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3087 "Done %s.\n", __func__);
3088 /* Re-endianize - firmware data is le32. */
3089 dwords = sizeof(struct link_statistics) / 4;
3090 iter = &stats->link_fail_cnt;
3091 for ( ; dwords--; iter++)
3096 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3103 qla24xx_abort_command(srb_t *sp)
3106 unsigned long flags = 0;
3108 struct abort_entry_24xx *abt;
3111 fc_port_t *fcport = sp->fcport;
3112 struct scsi_qla_host *vha = fcport->vha;
3113 struct qla_hw_data *ha = vha->hw;
3114 struct req_que *req = vha->req;
3115 struct qla_qpair *qpair = sp->qpair;
3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3118 "Entered %s.\n", __func__);
3121 req = sp->qpair->req;
3123 return QLA_FUNCTION_FAILED;
3125 if (ql2xasynctmfenable)
3126 return qla24xx_async_abort_command(sp);
3128 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3129 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3130 if (req->outstanding_cmds[handle] == sp)
3133 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3134 if (handle == req->num_outstanding_cmds) {
3135 /* Command not found. */
3136 return QLA_FUNCTION_FAILED;
3139 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3141 ql_log(ql_log_warn, vha, 0x108d,
3142 "Failed to allocate abort IOCB.\n");
3143 return QLA_MEMORY_ALLOC_FAILED;
3146 abt->entry_type = ABORT_IOCB_TYPE;
3147 abt->entry_count = 1;
3148 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3149 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3150 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3151 abt->port_id[0] = fcport->d_id.b.al_pa;
3152 abt->port_id[1] = fcport->d_id.b.area;
3153 abt->port_id[2] = fcport->d_id.b.domain;
3154 abt->vp_index = fcport->vha->vp_idx;
3156 abt->req_que_no = cpu_to_le16(req->id);
3158 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3159 if (rval != QLA_SUCCESS) {
3160 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3161 "Failed to issue IOCB (%x).\n", rval);
3162 } else if (abt->entry_status != 0) {
3163 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3164 "Failed to complete IOCB -- error status (%x).\n",
3166 rval = QLA_FUNCTION_FAILED;
3167 } else if (abt->nport_handle != cpu_to_le16(0)) {
3168 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3169 "Failed to complete IOCB -- completion status (%x).\n",
3170 le16_to_cpu(abt->nport_handle));
3171 if (abt->nport_handle == CS_IOCB_ERROR)
3172 rval = QLA_FUNCTION_PARAMETER_ERROR;
3174 rval = QLA_FUNCTION_FAILED;
3176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3177 "Done %s.\n", __func__);
3180 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3185 struct tsk_mgmt_cmd {
3187 struct tsk_mgmt_entry tsk;
3188 struct sts_entry_24xx sts;
3193 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3194 uint64_t l, int tag)
3197 struct tsk_mgmt_cmd *tsk;
3198 struct sts_entry_24xx *sts;
3200 scsi_qla_host_t *vha;
3201 struct qla_hw_data *ha;
3202 struct req_que *req;
3203 struct qla_qpair *qpair;
3209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3210 "Entered %s.\n", __func__);
3212 if (vha->vp_idx && vha->qpair) {
3218 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3220 ql_log(ql_log_warn, vha, 0x1093,
3221 "Failed to allocate task management IOCB.\n");
3222 return QLA_MEMORY_ALLOC_FAILED;
3225 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3226 tsk->p.tsk.entry_count = 1;
3227 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3228 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3229 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3230 tsk->p.tsk.control_flags = cpu_to_le32(type);
3231 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3232 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3233 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3234 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3235 if (type == TCF_LUN_RESET) {
3236 int_to_scsilun(l, &tsk->p.tsk.lun);
3237 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3238 sizeof(tsk->p.tsk.lun));
3242 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3243 if (rval != QLA_SUCCESS) {
3244 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3245 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3246 } else if (sts->entry_status != 0) {
3247 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3248 "Failed to complete IOCB -- error status (%x).\n",
3250 rval = QLA_FUNCTION_FAILED;
3251 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3252 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3253 "Failed to complete IOCB -- completion status (%x).\n",
3254 le16_to_cpu(sts->comp_status));
3255 rval = QLA_FUNCTION_FAILED;
3256 } else if (le16_to_cpu(sts->scsi_status) &
3257 SS_RESPONSE_INFO_LEN_VALID) {
3258 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3260 "Ignoring inconsistent data length -- not enough "
3261 "response info (%d).\n",
3262 le32_to_cpu(sts->rsp_data_len));
3263 } else if (sts->data[3]) {
3264 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3265 "Failed to complete IOCB -- response (%x).\n",
3267 rval = QLA_FUNCTION_FAILED;
3271 /* Issue marker IOCB. */
3272 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3273 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3274 if (rval2 != QLA_SUCCESS) {
3275 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3276 "Failed to issue marker IOCB (%x).\n", rval2);
3278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3279 "Done %s.\n", __func__);
3282 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3288 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3290 struct qla_hw_data *ha = fcport->vha->hw;
3292 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3293 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3295 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3299 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3301 struct qla_hw_data *ha = fcport->vha->hw;
3303 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3304 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3306 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3310 qla2x00_system_error(scsi_qla_host_t *vha)
3314 mbx_cmd_t *mcp = &mc;
3315 struct qla_hw_data *ha = vha->hw;
3317 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3318 return QLA_FUNCTION_FAILED;
3320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3321 "Entered %s.\n", __func__);
3323 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3324 mcp->out_mb = MBX_0;
3328 rval = qla2x00_mailbox_command(vha, mcp);
3330 if (rval != QLA_SUCCESS) {
3331 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3334 "Done %s.\n", __func__);
3341 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3345 mbx_cmd_t *mcp = &mc;
3347 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3348 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3349 return QLA_FUNCTION_FAILED;
3351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3352 "Entered %s.\n", __func__);
3354 mcp->mb[0] = MBC_WRITE_SERDES;
3356 if (IS_QLA2031(vha->hw))
3357 mcp->mb[2] = data & 0xff;
3362 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3364 mcp->tov = MBX_TOV_SECONDS;
3366 rval = qla2x00_mailbox_command(vha, mcp);
3368 if (rval != QLA_SUCCESS) {
3369 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3373 "Done %s.\n", __func__);
3380 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3384 mbx_cmd_t *mcp = &mc;
3386 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3387 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3388 return QLA_FUNCTION_FAILED;
3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3391 "Entered %s.\n", __func__);
3393 mcp->mb[0] = MBC_READ_SERDES;
3396 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3397 mcp->in_mb = MBX_1|MBX_0;
3398 mcp->tov = MBX_TOV_SECONDS;
3400 rval = qla2x00_mailbox_command(vha, mcp);
3402 if (IS_QLA2031(vha->hw))
3403 *data = mcp->mb[1] & 0xff;
3407 if (rval != QLA_SUCCESS) {
3408 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3409 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3412 "Done %s.\n", __func__);
3419 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3423 mbx_cmd_t *mcp = &mc;
3425 if (!IS_QLA8044(vha->hw))
3426 return QLA_FUNCTION_FAILED;
3428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3429 "Entered %s.\n", __func__);
3431 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3432 mcp->mb[1] = HCS_WRITE_SERDES;
3433 mcp->mb[3] = LSW(addr);
3434 mcp->mb[4] = MSW(addr);
3435 mcp->mb[5] = LSW(data);
3436 mcp->mb[6] = MSW(data);
3437 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3439 mcp->tov = MBX_TOV_SECONDS;
3441 rval = qla2x00_mailbox_command(vha, mcp);
3443 if (rval != QLA_SUCCESS) {
3444 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3448 "Done %s.\n", __func__);
3455 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3459 mbx_cmd_t *mcp = &mc;
3461 if (!IS_QLA8044(vha->hw))
3462 return QLA_FUNCTION_FAILED;
3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3465 "Entered %s.\n", __func__);
3467 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3468 mcp->mb[1] = HCS_READ_SERDES;
3469 mcp->mb[3] = LSW(addr);
3470 mcp->mb[4] = MSW(addr);
3471 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3472 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3473 mcp->tov = MBX_TOV_SECONDS;
3475 rval = qla2x00_mailbox_command(vha, mcp);
3477 *data = mcp->mb[2] << 16 | mcp->mb[1];
3479 if (rval != QLA_SUCCESS) {
3480 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3484 "Done %s.\n", __func__);
3491 * qla2x00_set_serdes_params() -
3493 * @sw_em_1g: serial link options
3494 * @sw_em_2g: serial link options
3495 * @sw_em_4g: serial link options
3500 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3501 uint16_t sw_em_2g, uint16_t sw_em_4g)
3505 mbx_cmd_t *mcp = &mc;
3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3508 "Entered %s.\n", __func__);
3510 mcp->mb[0] = MBC_SERDES_PARAMS;
3512 mcp->mb[2] = sw_em_1g | BIT_15;
3513 mcp->mb[3] = sw_em_2g | BIT_15;
3514 mcp->mb[4] = sw_em_4g | BIT_15;
3515 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3517 mcp->tov = MBX_TOV_SECONDS;
3519 rval = qla2x00_mailbox_command(vha, mcp);
3521 if (rval != QLA_SUCCESS) {
3523 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3524 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3528 "Done %s.\n", __func__);
3535 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3539 mbx_cmd_t *mcp = &mc;
3541 if (!IS_FWI2_CAPABLE(vha->hw))
3542 return QLA_FUNCTION_FAILED;
3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3545 "Entered %s.\n", __func__);
3547 mcp->mb[0] = MBC_STOP_FIRMWARE;
3549 mcp->out_mb = MBX_1|MBX_0;
3553 rval = qla2x00_mailbox_command(vha, mcp);
3555 if (rval != QLA_SUCCESS) {
3556 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3557 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3558 rval = QLA_INVALID_COMMAND;
3560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3561 "Done %s.\n", __func__);
3568 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3573 mbx_cmd_t *mcp = &mc;
3575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3576 "Entered %s.\n", __func__);
3578 if (!IS_FWI2_CAPABLE(vha->hw))
3579 return QLA_FUNCTION_FAILED;
3581 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3582 return QLA_FUNCTION_FAILED;
3584 mcp->mb[0] = MBC_TRACE_CONTROL;
3585 mcp->mb[1] = TC_EFT_ENABLE;
3586 mcp->mb[2] = LSW(eft_dma);
3587 mcp->mb[3] = MSW(eft_dma);
3588 mcp->mb[4] = LSW(MSD(eft_dma));
3589 mcp->mb[5] = MSW(MSD(eft_dma));
3590 mcp->mb[6] = buffers;
3591 mcp->mb[7] = TC_AEN_DISABLE;
3592 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3593 mcp->in_mb = MBX_1|MBX_0;
3594 mcp->tov = MBX_TOV_SECONDS;
3596 rval = qla2x00_mailbox_command(vha, mcp);
3597 if (rval != QLA_SUCCESS) {
3598 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3599 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3600 rval, mcp->mb[0], mcp->mb[1]);
3602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3603 "Done %s.\n", __func__);
3610 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3614 mbx_cmd_t *mcp = &mc;
3616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3617 "Entered %s.\n", __func__);
3619 if (!IS_FWI2_CAPABLE(vha->hw))
3620 return QLA_FUNCTION_FAILED;
3622 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3623 return QLA_FUNCTION_FAILED;
3625 mcp->mb[0] = MBC_TRACE_CONTROL;
3626 mcp->mb[1] = TC_EFT_DISABLE;
3627 mcp->out_mb = MBX_1|MBX_0;
3628 mcp->in_mb = MBX_1|MBX_0;
3629 mcp->tov = MBX_TOV_SECONDS;
3631 rval = qla2x00_mailbox_command(vha, mcp);
3632 if (rval != QLA_SUCCESS) {
3633 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3634 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3635 rval, mcp->mb[0], mcp->mb[1]);
3637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3638 "Done %s.\n", __func__);
3645 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3646 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3650 mbx_cmd_t *mcp = &mc;
3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3653 "Entered %s.\n", __func__);
3655 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3656 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3657 !IS_QLA28XX(vha->hw))
3658 return QLA_FUNCTION_FAILED;
3660 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3661 return QLA_FUNCTION_FAILED;
3663 mcp->mb[0] = MBC_TRACE_CONTROL;
3664 mcp->mb[1] = TC_FCE_ENABLE;
3665 mcp->mb[2] = LSW(fce_dma);
3666 mcp->mb[3] = MSW(fce_dma);
3667 mcp->mb[4] = LSW(MSD(fce_dma));
3668 mcp->mb[5] = MSW(MSD(fce_dma));
3669 mcp->mb[6] = buffers;
3670 mcp->mb[7] = TC_AEN_DISABLE;
3672 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3673 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3674 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3676 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3677 mcp->tov = MBX_TOV_SECONDS;
3679 rval = qla2x00_mailbox_command(vha, mcp);
3680 if (rval != QLA_SUCCESS) {
3681 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3682 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3683 rval, mcp->mb[0], mcp->mb[1]);
3685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3686 "Done %s.\n", __func__);
3689 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3698 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3702 mbx_cmd_t *mcp = &mc;
3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3705 "Entered %s.\n", __func__);
3707 if (!IS_FWI2_CAPABLE(vha->hw))
3708 return QLA_FUNCTION_FAILED;
3710 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3711 return QLA_FUNCTION_FAILED;
3713 mcp->mb[0] = MBC_TRACE_CONTROL;
3714 mcp->mb[1] = TC_FCE_DISABLE;
3715 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3716 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3717 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3719 mcp->tov = MBX_TOV_SECONDS;
3721 rval = qla2x00_mailbox_command(vha, mcp);
3722 if (rval != QLA_SUCCESS) {
3723 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3724 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3725 rval, mcp->mb[0], mcp->mb[1]);
3727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3728 "Done %s.\n", __func__);
3731 *wr = (uint64_t) mcp->mb[5] << 48 |
3732 (uint64_t) mcp->mb[4] << 32 |
3733 (uint64_t) mcp->mb[3] << 16 |
3734 (uint64_t) mcp->mb[2];
3736 *rd = (uint64_t) mcp->mb[9] << 48 |
3737 (uint64_t) mcp->mb[8] << 32 |
3738 (uint64_t) mcp->mb[7] << 16 |
3739 (uint64_t) mcp->mb[6];
3746 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3747 uint16_t *port_speed, uint16_t *mb)
3751 mbx_cmd_t *mcp = &mc;
3753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3754 "Entered %s.\n", __func__);
3756 if (!IS_IIDMA_CAPABLE(vha->hw))
3757 return QLA_FUNCTION_FAILED;
3759 mcp->mb[0] = MBC_PORT_PARAMS;
3760 mcp->mb[1] = loop_id;
3761 mcp->mb[2] = mcp->mb[3] = 0;
3762 mcp->mb[9] = vha->vp_idx;
3763 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3764 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3765 mcp->tov = MBX_TOV_SECONDS;
3767 rval = qla2x00_mailbox_command(vha, mcp);
3769 /* Return mailbox statuses. */
3776 if (rval != QLA_SUCCESS) {
3777 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3780 "Done %s.\n", __func__);
3782 *port_speed = mcp->mb[3];
3789 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3790 uint16_t port_speed, uint16_t *mb)
3794 mbx_cmd_t *mcp = &mc;
3796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3797 "Entered %s.\n", __func__);
3799 if (!IS_IIDMA_CAPABLE(vha->hw))
3800 return QLA_FUNCTION_FAILED;
3802 mcp->mb[0] = MBC_PORT_PARAMS;
3803 mcp->mb[1] = loop_id;
3805 mcp->mb[3] = port_speed & 0x3F;
3806 mcp->mb[9] = vha->vp_idx;
3807 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3808 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3809 mcp->tov = MBX_TOV_SECONDS;
3811 rval = qla2x00_mailbox_command(vha, mcp);
3813 /* Return mailbox statuses. */
3820 if (rval != QLA_SUCCESS) {
3821 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3822 "Failed=%x.\n", rval);
3824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3825 "Done %s.\n", __func__);
3832 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3833 struct vp_rpt_id_entry_24xx *rptid_entry)
3835 struct qla_hw_data *ha = vha->hw;
3836 scsi_qla_host_t *vp = NULL;
3837 unsigned long flags;
3840 struct fc_port *fcport;
3842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3843 "Entered %s.\n", __func__);
3845 if (rptid_entry->entry_status != 0)
3848 id.b.domain = rptid_entry->port_id[2];
3849 id.b.area = rptid_entry->port_id[1];
3850 id.b.al_pa = rptid_entry->port_id[0];
3852 ha->flags.n2n_ae = 0;
3854 if (rptid_entry->format == 0) {
3856 ql_dbg(ql_dbg_async, vha, 0x10b7,
3857 "Format 0 : Number of VPs setup %d, number of "
3858 "VPs acquired %d.\n", rptid_entry->vp_setup,
3859 rptid_entry->vp_acquired);
3860 ql_dbg(ql_dbg_async, vha, 0x10b8,
3861 "Primary port id %02x%02x%02x.\n",
3862 rptid_entry->port_id[2], rptid_entry->port_id[1],
3863 rptid_entry->port_id[0]);
3864 ha->current_topology = ISP_CFG_NL;
3865 qlt_update_host_map(vha, id);
3867 } else if (rptid_entry->format == 1) {
3869 ql_dbg(ql_dbg_async, vha, 0x10b9,
3870 "Format 1: VP[%d] enabled - status %d - with "
3871 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3872 rptid_entry->vp_status,
3873 rptid_entry->port_id[2], rptid_entry->port_id[1],
3874 rptid_entry->port_id[0]);
3875 ql_dbg(ql_dbg_async, vha, 0x5075,
3876 "Format 1: Remote WWPN %8phC.\n",
3877 rptid_entry->u.f1.port_name);
3879 ql_dbg(ql_dbg_async, vha, 0x5075,
3880 "Format 1: WWPN %8phC.\n",
3883 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3885 ha->current_topology = ISP_CFG_N;
3886 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3887 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3888 fcport->scan_state = QLA_FCPORT_SCAN;
3889 fcport->n2n_flag = 0;
3892 fcport = qla2x00_find_fcport_by_wwpn(vha,
3893 rptid_entry->u.f1.port_name, 1);
3894 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3897 fcport->plogi_nack_done_deadline = jiffies + HZ;
3898 fcport->dm_login_expire = jiffies + 2*HZ;
3899 fcport->scan_state = QLA_FCPORT_FOUND;
3900 fcport->n2n_flag = 1;
3901 fcport->keep_nport_handle = 1;
3902 fcport->fc4_type = FS_FC4TYPE_FCP;
3903 if (vha->flags.nvme_enabled)
3904 fcport->fc4_type |= FS_FC4TYPE_NVME;
3906 switch (fcport->disc_state) {
3908 set_bit(RELOGIN_NEEDED,
3911 case DSC_DELETE_PEND:
3914 qlt_schedule_sess_for_deletion(fcport);
3919 if (wwn_to_u64(vha->port_name) >
3920 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3922 vha->d_id.b.al_pa = 1;
3923 ha->flags.n2n_bigger = 1;
3924 ha->flags.n2n_ae = 0;
3927 ql_dbg(ql_dbg_async, vha, 0x5075,
3928 "Format 1: assign local id %x remote id %x\n",
3929 vha->d_id.b24, id.b24);
3931 ql_dbg(ql_dbg_async, vha, 0x5075,
3932 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3933 rptid_entry->u.f1.port_name);
3934 ha->flags.n2n_bigger = 0;
3935 ha->flags.n2n_ae = 1;
3937 qla24xx_post_newsess_work(vha, &id,
3938 rptid_entry->u.f1.port_name,
3939 rptid_entry->u.f1.node_name,
3944 /* if our portname is higher then initiate N2N login */
3946 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3950 ha->current_topology = ISP_CFG_FL;
3953 ha->current_topology = ISP_CFG_F;
3959 ha->flags.gpsc_supported = 1;
3960 ha->current_topology = ISP_CFG_F;
3961 /* buffer to buffer credit flag */
3962 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3964 if (rptid_entry->vp_idx == 0) {
3965 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3966 /* FA-WWN is only for physical port */
3967 if (qla_ini_mode_enabled(vha) &&
3968 ha->flags.fawwpn_enabled &&
3969 (rptid_entry->u.f1.flags &
3971 memcpy(vha->port_name,
3972 rptid_entry->u.f1.port_name,
3976 qlt_update_host_map(vha, id);
3979 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3980 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3982 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3983 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3984 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3985 "Could not acquire ID for VP[%d].\n",
3986 rptid_entry->vp_idx);
3991 spin_lock_irqsave(&ha->vport_slock, flags);
3992 list_for_each_entry(vp, &ha->vp_list, list) {
3993 if (rptid_entry->vp_idx == vp->vp_idx) {
3998 spin_unlock_irqrestore(&ha->vport_slock, flags);
4003 qlt_update_host_map(vp, id);
4006 * Cannot configure here as we are still sitting on the
4007 * response queue. Handle it in dpc context.
4009 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4010 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4011 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4013 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4014 qla2xxx_wake_dpc(vha);
4015 } else if (rptid_entry->format == 2) {
4016 ql_dbg(ql_dbg_async, vha, 0x505f,
4017 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4018 rptid_entry->port_id[2], rptid_entry->port_id[1],
4019 rptid_entry->port_id[0]);
4021 ql_dbg(ql_dbg_async, vha, 0x5075,
4022 "N2N: Remote WWPN %8phC.\n",
4023 rptid_entry->u.f2.port_name);
4025 /* N2N. direct connect */
4026 ha->current_topology = ISP_CFG_N;
4027 ha->flags.rida_fmt2 = 1;
4028 vha->d_id.b.domain = rptid_entry->port_id[2];
4029 vha->d_id.b.area = rptid_entry->port_id[1];
4030 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4032 ha->flags.n2n_ae = 1;
4033 spin_lock_irqsave(&ha->vport_slock, flags);
4034 qlt_update_vp_map(vha, SET_AL_PA);
4035 spin_unlock_irqrestore(&ha->vport_slock, flags);
4037 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4038 fcport->scan_state = QLA_FCPORT_SCAN;
4039 fcport->n2n_flag = 0;
4042 fcport = qla2x00_find_fcport_by_wwpn(vha,
4043 rptid_entry->u.f2.port_name, 1);
4046 fcport->login_retry = vha->hw->login_retry_count;
4047 fcport->plogi_nack_done_deadline = jiffies + HZ;
4048 fcport->scan_state = QLA_FCPORT_FOUND;
4049 fcport->keep_nport_handle = 1;
4050 fcport->n2n_flag = 1;
4051 fcport->d_id.b.domain =
4052 rptid_entry->u.f2.remote_nport_id[2];
4053 fcport->d_id.b.area =
4054 rptid_entry->u.f2.remote_nport_id[1];
4055 fcport->d_id.b.al_pa =
4056 rptid_entry->u.f2.remote_nport_id[0];
4062 * qla24xx_modify_vp_config
4063 * Change VP configuration for vha
4066 * vha = adapter block pointer.
4069 * qla2xxx local function return status code.
4075 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4078 struct vp_config_entry_24xx *vpmod;
4079 dma_addr_t vpmod_dma;
4080 struct qla_hw_data *ha = vha->hw;
4081 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4083 /* This can be called by the parent */
4085 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4086 "Entered %s.\n", __func__);
4088 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4090 ql_log(ql_log_warn, vha, 0x10bc,
4091 "Failed to allocate modify VP IOCB.\n");
4092 return QLA_MEMORY_ALLOC_FAILED;
4095 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4096 vpmod->entry_count = 1;
4097 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4098 vpmod->vp_count = 1;
4099 vpmod->vp_index1 = vha->vp_idx;
4100 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4102 qlt_modify_vp_config(vha, vpmod);
4104 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4105 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4106 vpmod->entry_count = 1;
4108 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4109 if (rval != QLA_SUCCESS) {
4110 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4111 "Failed to issue VP config IOCB (%x).\n", rval);
4112 } else if (vpmod->comp_status != 0) {
4113 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4114 "Failed to complete IOCB -- error status (%x).\n",
4115 vpmod->comp_status);
4116 rval = QLA_FUNCTION_FAILED;
4117 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4118 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4119 "Failed to complete IOCB -- completion status (%x).\n",
4120 le16_to_cpu(vpmod->comp_status));
4121 rval = QLA_FUNCTION_FAILED;
4124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4125 "Done %s.\n", __func__);
4126 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4128 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4134 * qla2x00_send_change_request
4135 * Receive or disable RSCN request from fabric controller
4138 * ha = adapter block pointer
4139 * format = registration format:
4141 * 1 - Fabric detected registration
4142 * 2 - N_port detected registration
4143 * 3 - Full registration
4144 * FF - clear registration
4145 * vp_idx = Virtual port index
4148 * qla2x00 local function return status code.
4155 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4160 mbx_cmd_t *mcp = &mc;
4162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4163 "Entered %s.\n", __func__);
4165 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4166 mcp->mb[1] = format;
4167 mcp->mb[9] = vp_idx;
4168 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4169 mcp->in_mb = MBX_0|MBX_1;
4170 mcp->tov = MBX_TOV_SECONDS;
4172 rval = qla2x00_mailbox_command(vha, mcp);
4174 if (rval == QLA_SUCCESS) {
4175 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4185 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4190 mbx_cmd_t *mcp = &mc;
4192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4193 "Entered %s.\n", __func__);
4195 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4196 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4197 mcp->mb[8] = MSW(addr);
4198 mcp->out_mb = MBX_8|MBX_0;
4200 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4201 mcp->out_mb = MBX_0;
4203 mcp->mb[1] = LSW(addr);
4204 mcp->mb[2] = MSW(req_dma);
4205 mcp->mb[3] = LSW(req_dma);
4206 mcp->mb[6] = MSW(MSD(req_dma));
4207 mcp->mb[7] = LSW(MSD(req_dma));
4208 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4209 if (IS_FWI2_CAPABLE(vha->hw)) {
4210 mcp->mb[4] = MSW(size);
4211 mcp->mb[5] = LSW(size);
4212 mcp->out_mb |= MBX_5|MBX_4;
4214 mcp->mb[4] = LSW(size);
4215 mcp->out_mb |= MBX_4;
4219 mcp->tov = MBX_TOV_SECONDS;
4221 rval = qla2x00_mailbox_command(vha, mcp);
4223 if (rval != QLA_SUCCESS) {
4224 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4225 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4228 "Done %s.\n", __func__);
4233 /* 84XX Support **************************************************************/
4235 struct cs84xx_mgmt_cmd {
4237 struct verify_chip_entry_84xx req;
4238 struct verify_chip_rsp_84xx rsp;
4243 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4246 struct cs84xx_mgmt_cmd *mn;
4249 unsigned long flags;
4250 struct qla_hw_data *ha = vha->hw;
4252 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4253 "Entered %s.\n", __func__);
4255 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4257 return QLA_MEMORY_ALLOC_FAILED;
4261 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4262 /* Diagnostic firmware? */
4263 /* options |= MENLO_DIAG_FW; */
4264 /* We update the firmware with only one data sequence. */
4265 options |= VCO_END_OF_DATA;
4269 memset(mn, 0, sizeof(*mn));
4270 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4271 mn->p.req.entry_count = 1;
4272 mn->p.req.options = cpu_to_le16(options);
4274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4275 "Dump of Verify Request.\n");
4276 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4279 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4280 if (rval != QLA_SUCCESS) {
4281 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4282 "Failed to issue verify IOCB (%x).\n", rval);
4286 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4287 "Dump of Verify Response.\n");
4288 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4291 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4292 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4293 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4295 "cs=%x fc=%x.\n", status[0], status[1]);
4297 if (status[0] != CS_COMPLETE) {
4298 rval = QLA_FUNCTION_FAILED;
4299 if (!(options & VCO_DONT_UPDATE_FW)) {
4300 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4301 "Firmware update failed. Retrying "
4302 "without update firmware.\n");
4303 options |= VCO_DONT_UPDATE_FW;
4304 options &= ~VCO_FORCE_UPDATE;
4308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4309 "Firmware updated to %x.\n",
4310 le32_to_cpu(mn->p.rsp.fw_ver));
4312 /* NOTE: we only update OP firmware. */
4313 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4314 ha->cs84xx->op_fw_version =
4315 le32_to_cpu(mn->p.rsp.fw_ver);
4316 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4322 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4324 if (rval != QLA_SUCCESS) {
4325 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4326 "Failed=%x.\n", rval);
4328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4329 "Done %s.\n", __func__);
4336 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4339 unsigned long flags;
4341 mbx_cmd_t *mcp = &mc;
4342 struct qla_hw_data *ha = vha->hw;
4344 if (!ha->flags.fw_started)
4347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4348 "Entered %s.\n", __func__);
4350 if (IS_SHADOW_REG_CAPABLE(ha))
4351 req->options |= BIT_13;
4353 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4354 mcp->mb[1] = req->options;
4355 mcp->mb[2] = MSW(LSD(req->dma));
4356 mcp->mb[3] = LSW(LSD(req->dma));
4357 mcp->mb[6] = MSW(MSD(req->dma));
4358 mcp->mb[7] = LSW(MSD(req->dma));
4359 mcp->mb[5] = req->length;
4361 mcp->mb[10] = req->rsp->id;
4362 mcp->mb[12] = req->qos;
4363 mcp->mb[11] = req->vp_idx;
4364 mcp->mb[13] = req->rid;
4365 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4368 mcp->mb[4] = req->id;
4369 /* que in ptr index */
4371 /* que out ptr index */
4372 mcp->mb[9] = *req->out_ptr = 0;
4373 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4374 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4376 mcp->flags = MBX_DMA_OUT;
4377 mcp->tov = MBX_TOV_SECONDS * 2;
4379 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4381 mcp->in_mb |= MBX_1;
4382 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4383 mcp->out_mb |= MBX_15;
4384 /* debug q create issue in SR-IOV */
4385 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4388 spin_lock_irqsave(&ha->hardware_lock, flags);
4389 if (!(req->options & BIT_0)) {
4390 WRT_REG_DWORD(req->req_q_in, 0);
4391 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4392 WRT_REG_DWORD(req->req_q_out, 0);
4394 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4396 rval = qla2x00_mailbox_command(vha, mcp);
4397 if (rval != QLA_SUCCESS) {
4398 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4399 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4402 "Done %s.\n", __func__);
4409 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4412 unsigned long flags;
4414 mbx_cmd_t *mcp = &mc;
4415 struct qla_hw_data *ha = vha->hw;
4417 if (!ha->flags.fw_started)
4420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4421 "Entered %s.\n", __func__);
4423 if (IS_SHADOW_REG_CAPABLE(ha))
4424 rsp->options |= BIT_13;
4426 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4427 mcp->mb[1] = rsp->options;
4428 mcp->mb[2] = MSW(LSD(rsp->dma));
4429 mcp->mb[3] = LSW(LSD(rsp->dma));
4430 mcp->mb[6] = MSW(MSD(rsp->dma));
4431 mcp->mb[7] = LSW(MSD(rsp->dma));
4432 mcp->mb[5] = rsp->length;
4433 mcp->mb[14] = rsp->msix->entry;
4434 mcp->mb[13] = rsp->rid;
4435 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4438 mcp->mb[4] = rsp->id;
4439 /* que in ptr index */
4440 mcp->mb[8] = *rsp->in_ptr = 0;
4441 /* que out ptr index */
4443 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4444 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4446 mcp->flags = MBX_DMA_OUT;
4447 mcp->tov = MBX_TOV_SECONDS * 2;
4449 if (IS_QLA81XX(ha)) {
4450 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4451 mcp->in_mb |= MBX_1;
4452 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4453 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4454 mcp->in_mb |= MBX_1;
4455 /* debug q create issue in SR-IOV */
4456 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4459 spin_lock_irqsave(&ha->hardware_lock, flags);
4460 if (!(rsp->options & BIT_0)) {
4461 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4462 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4463 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4466 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4468 rval = qla2x00_mailbox_command(vha, mcp);
4469 if (rval != QLA_SUCCESS) {
4470 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4471 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4474 "Done %s.\n", __func__);
4481 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4485 mbx_cmd_t *mcp = &mc;
4487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4488 "Entered %s.\n", __func__);
4490 mcp->mb[0] = MBC_IDC_ACK;
4491 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4492 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4494 mcp->tov = MBX_TOV_SECONDS;
4496 rval = qla2x00_mailbox_command(vha, mcp);
4498 if (rval != QLA_SUCCESS) {
4499 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4500 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4503 "Done %s.\n", __func__);
4510 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4514 mbx_cmd_t *mcp = &mc;
4516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4517 "Entered %s.\n", __func__);
4519 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4520 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4521 return QLA_FUNCTION_FAILED;
4523 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4524 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4525 mcp->out_mb = MBX_1|MBX_0;
4526 mcp->in_mb = MBX_1|MBX_0;
4527 mcp->tov = MBX_TOV_SECONDS;
4529 rval = qla2x00_mailbox_command(vha, mcp);
4531 if (rval != QLA_SUCCESS) {
4532 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4533 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4534 rval, mcp->mb[0], mcp->mb[1]);
4536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4537 "Done %s.\n", __func__);
4538 *sector_size = mcp->mb[1];
4545 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4549 mbx_cmd_t *mcp = &mc;
4551 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4552 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4553 return QLA_FUNCTION_FAILED;
4555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4556 "Entered %s.\n", __func__);
4558 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4559 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4560 FAC_OPT_CMD_WRITE_PROTECT;
4561 mcp->out_mb = MBX_1|MBX_0;
4562 mcp->in_mb = MBX_1|MBX_0;
4563 mcp->tov = MBX_TOV_SECONDS;
4565 rval = qla2x00_mailbox_command(vha, mcp);
4567 if (rval != QLA_SUCCESS) {
4568 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4569 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4570 rval, mcp->mb[0], mcp->mb[1]);
4572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4573 "Done %s.\n", __func__);
4580 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4584 mbx_cmd_t *mcp = &mc;
4586 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4587 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4588 return QLA_FUNCTION_FAILED;
4590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4591 "Entered %s.\n", __func__);
4593 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4594 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4595 mcp->mb[2] = LSW(start);
4596 mcp->mb[3] = MSW(start);
4597 mcp->mb[4] = LSW(finish);
4598 mcp->mb[5] = MSW(finish);
4599 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4600 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4601 mcp->tov = MBX_TOV_SECONDS;
4603 rval = qla2x00_mailbox_command(vha, mcp);
4605 if (rval != QLA_SUCCESS) {
4606 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4607 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4608 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4611 "Done %s.\n", __func__);
4618 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4620 int rval = QLA_SUCCESS;
4622 mbx_cmd_t *mcp = &mc;
4623 struct qla_hw_data *ha = vha->hw;
4625 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4626 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4630 "Entered %s.\n", __func__);
4632 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4633 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4634 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4635 mcp->out_mb = MBX_1|MBX_0;
4636 mcp->in_mb = MBX_1|MBX_0;
4637 mcp->tov = MBX_TOV_SECONDS;
4639 rval = qla2x00_mailbox_command(vha, mcp);
4641 if (rval != QLA_SUCCESS) {
4642 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4643 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4644 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4647 "Done %s.\n", __func__);
4654 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4658 mbx_cmd_t *mcp = &mc;
4660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4661 "Entered %s.\n", __func__);
4663 mcp->mb[0] = MBC_RESTART_MPI_FW;
4664 mcp->out_mb = MBX_0;
4665 mcp->in_mb = MBX_0|MBX_1;
4666 mcp->tov = MBX_TOV_SECONDS;
4668 rval = qla2x00_mailbox_command(vha, mcp);
4670 if (rval != QLA_SUCCESS) {
4671 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4672 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4673 rval, mcp->mb[0], mcp->mb[1]);
4675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4676 "Done %s.\n", __func__);
4683 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4687 mbx_cmd_t *mcp = &mc;
4691 struct qla_hw_data *ha = vha->hw;
4693 if (!IS_P3P_TYPE(ha))
4694 return QLA_FUNCTION_FAILED;
4696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4697 "Entered %s.\n", __func__);
4699 str = (void *)version;
4700 len = strlen(version);
4702 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4703 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4704 mcp->out_mb = MBX_1|MBX_0;
4705 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4706 mcp->mb[i] = cpu_to_le16p(str);
4707 mcp->out_mb |= 1<<i;
4709 for (; i < 16; i++) {
4711 mcp->out_mb |= 1<<i;
4713 mcp->in_mb = MBX_1|MBX_0;
4714 mcp->tov = MBX_TOV_SECONDS;
4716 rval = qla2x00_mailbox_command(vha, mcp);
4718 if (rval != QLA_SUCCESS) {
4719 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4720 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4723 "Done %s.\n", __func__);
4730 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4734 mbx_cmd_t *mcp = &mc;
4739 struct qla_hw_data *ha = vha->hw;
4741 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4743 return QLA_FUNCTION_FAILED;
4745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4746 "Entered %s.\n", __func__);
4748 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4750 ql_log(ql_log_warn, vha, 0x117f,
4751 "Failed to allocate driver version param.\n");
4752 return QLA_MEMORY_ALLOC_FAILED;
4755 memcpy(str, "\x7\x3\x11\x0", 4);
4757 len = dwlen * 4 - 4;
4758 memset(str + 4, 0, len);
4759 if (len > strlen(version))
4760 len = strlen(version);
4761 memcpy(str + 4, version, len);
4763 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4764 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4765 mcp->mb[2] = MSW(LSD(str_dma));
4766 mcp->mb[3] = LSW(LSD(str_dma));
4767 mcp->mb[6] = MSW(MSD(str_dma));
4768 mcp->mb[7] = LSW(MSD(str_dma));
4769 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4770 mcp->in_mb = MBX_1|MBX_0;
4771 mcp->tov = MBX_TOV_SECONDS;
4773 rval = qla2x00_mailbox_command(vha, mcp);
4775 if (rval != QLA_SUCCESS) {
4776 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4777 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4780 "Done %s.\n", __func__);
4783 dma_pool_free(ha->s_dma_pool, str, str_dma);
4789 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4790 void *buf, uint16_t bufsiz)
4794 mbx_cmd_t *mcp = &mc;
4797 if (!IS_FWI2_CAPABLE(vha->hw))
4798 return QLA_FUNCTION_FAILED;
4800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4801 "Entered %s.\n", __func__);
4803 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4804 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4805 mcp->mb[2] = MSW(buf_dma);
4806 mcp->mb[3] = LSW(buf_dma);
4807 mcp->mb[6] = MSW(MSD(buf_dma));
4808 mcp->mb[7] = LSW(MSD(buf_dma));
4809 mcp->mb[8] = bufsiz/4;
4810 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4811 mcp->in_mb = MBX_1|MBX_0;
4812 mcp->tov = MBX_TOV_SECONDS;
4814 rval = qla2x00_mailbox_command(vha, mcp);
4816 if (rval != QLA_SUCCESS) {
4817 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4818 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4821 "Done %s.\n", __func__);
4822 bp = (uint32_t *) buf;
4823 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4824 *bp = le32_to_cpu(*bp);
4831 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4835 mbx_cmd_t *mcp = &mc;
4837 if (!IS_FWI2_CAPABLE(vha->hw))
4838 return QLA_FUNCTION_FAILED;
4840 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4841 "Entered %s.\n", __func__);
4843 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4844 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4845 mcp->out_mb = MBX_1|MBX_0;
4846 mcp->in_mb = MBX_1|MBX_0;
4847 mcp->tov = MBX_TOV_SECONDS;
4849 rval = qla2x00_mailbox_command(vha, mcp);
4852 if (rval != QLA_SUCCESS) {
4853 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4854 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4857 "Done %s.\n", __func__);
4864 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4865 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4869 mbx_cmd_t *mcp = &mc;
4870 struct qla_hw_data *ha = vha->hw;
4872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4873 "Entered %s.\n", __func__);
4875 if (!IS_FWI2_CAPABLE(ha))
4876 return QLA_FUNCTION_FAILED;
4881 mcp->mb[0] = MBC_READ_SFP;
4883 mcp->mb[2] = MSW(sfp_dma);
4884 mcp->mb[3] = LSW(sfp_dma);
4885 mcp->mb[6] = MSW(MSD(sfp_dma));
4886 mcp->mb[7] = LSW(MSD(sfp_dma));
4890 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4891 mcp->in_mb = MBX_1|MBX_0;
4892 mcp->tov = MBX_TOV_SECONDS;
4894 rval = qla2x00_mailbox_command(vha, mcp);
4899 if (rval != QLA_SUCCESS) {
4900 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4901 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4902 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
4903 /* sfp is not there */
4904 rval = QLA_INTERFACE_ERROR;
4907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4908 "Done %s.\n", __func__);
4915 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4916 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4920 mbx_cmd_t *mcp = &mc;
4921 struct qla_hw_data *ha = vha->hw;
4923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4924 "Entered %s.\n", __func__);
4926 if (!IS_FWI2_CAPABLE(ha))
4927 return QLA_FUNCTION_FAILED;
4935 mcp->mb[0] = MBC_WRITE_SFP;
4937 mcp->mb[2] = MSW(sfp_dma);
4938 mcp->mb[3] = LSW(sfp_dma);
4939 mcp->mb[6] = MSW(MSD(sfp_dma));
4940 mcp->mb[7] = LSW(MSD(sfp_dma));
4944 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4945 mcp->in_mb = MBX_1|MBX_0;
4946 mcp->tov = MBX_TOV_SECONDS;
4948 rval = qla2x00_mailbox_command(vha, mcp);
4950 if (rval != QLA_SUCCESS) {
4951 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4952 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4954 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4955 "Done %s.\n", __func__);
4962 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4963 uint16_t size_in_bytes, uint16_t *actual_size)
4967 mbx_cmd_t *mcp = &mc;
4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4970 "Entered %s.\n", __func__);
4972 if (!IS_CNA_CAPABLE(vha->hw))
4973 return QLA_FUNCTION_FAILED;
4975 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4976 mcp->mb[2] = MSW(stats_dma);
4977 mcp->mb[3] = LSW(stats_dma);
4978 mcp->mb[6] = MSW(MSD(stats_dma));
4979 mcp->mb[7] = LSW(MSD(stats_dma));
4980 mcp->mb[8] = size_in_bytes >> 2;
4981 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4982 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4983 mcp->tov = MBX_TOV_SECONDS;
4985 rval = qla2x00_mailbox_command(vha, mcp);
4987 if (rval != QLA_SUCCESS) {
4988 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4989 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4990 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4993 "Done %s.\n", __func__);
4996 *actual_size = mcp->mb[2] << 2;
5003 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5008 mbx_cmd_t *mcp = &mc;
5010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5011 "Entered %s.\n", __func__);
5013 if (!IS_CNA_CAPABLE(vha->hw))
5014 return QLA_FUNCTION_FAILED;
5016 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5018 mcp->mb[2] = MSW(tlv_dma);
5019 mcp->mb[3] = LSW(tlv_dma);
5020 mcp->mb[6] = MSW(MSD(tlv_dma));
5021 mcp->mb[7] = LSW(MSD(tlv_dma));
5023 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5024 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5025 mcp->tov = MBX_TOV_SECONDS;
5027 rval = qla2x00_mailbox_command(vha, mcp);
5029 if (rval != QLA_SUCCESS) {
5030 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5031 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5032 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5035 "Done %s.\n", __func__);
5042 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5046 mbx_cmd_t *mcp = &mc;
5048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5049 "Entered %s.\n", __func__);
5051 if (!IS_FWI2_CAPABLE(vha->hw))
5052 return QLA_FUNCTION_FAILED;
5054 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5055 mcp->mb[1] = LSW(risc_addr);
5056 mcp->mb[8] = MSW(risc_addr);
5057 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5058 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5061 rval = qla2x00_mailbox_command(vha, mcp);
5062 if (rval != QLA_SUCCESS) {
5063 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5064 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5067 "Done %s.\n", __func__);
5068 *data = mcp->mb[3] << 16 | mcp->mb[2];
5075 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5080 mbx_cmd_t *mcp = &mc;
5082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5083 "Entered %s.\n", __func__);
5085 memset(mcp->mb, 0 , sizeof(mcp->mb));
5086 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5087 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5089 /* transfer count */
5090 mcp->mb[10] = LSW(mreq->transfer_size);
5091 mcp->mb[11] = MSW(mreq->transfer_size);
5093 /* send data address */
5094 mcp->mb[14] = LSW(mreq->send_dma);
5095 mcp->mb[15] = MSW(mreq->send_dma);
5096 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5097 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5099 /* receive data address */
5100 mcp->mb[16] = LSW(mreq->rcv_dma);
5101 mcp->mb[17] = MSW(mreq->rcv_dma);
5102 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5103 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5105 /* Iteration count */
5106 mcp->mb[18] = LSW(mreq->iteration_count);
5107 mcp->mb[19] = MSW(mreq->iteration_count);
5109 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5110 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5111 if (IS_CNA_CAPABLE(vha->hw))
5112 mcp->out_mb |= MBX_2;
5113 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5115 mcp->buf_size = mreq->transfer_size;
5116 mcp->tov = MBX_TOV_SECONDS;
5117 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5119 rval = qla2x00_mailbox_command(vha, mcp);
5121 if (rval != QLA_SUCCESS) {
5122 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5123 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5124 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5125 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5128 "Done %s.\n", __func__);
5131 /* Copy mailbox information */
5132 memcpy( mresp, mcp->mb, 64);
5137 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5142 mbx_cmd_t *mcp = &mc;
5143 struct qla_hw_data *ha = vha->hw;
5145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5146 "Entered %s.\n", __func__);
5148 memset(mcp->mb, 0 , sizeof(mcp->mb));
5149 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5150 /* BIT_6 specifies 64bit address */
5151 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5152 if (IS_CNA_CAPABLE(ha)) {
5153 mcp->mb[2] = vha->fcoe_fcf_idx;
5155 mcp->mb[16] = LSW(mreq->rcv_dma);
5156 mcp->mb[17] = MSW(mreq->rcv_dma);
5157 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5158 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5160 mcp->mb[10] = LSW(mreq->transfer_size);
5162 mcp->mb[14] = LSW(mreq->send_dma);
5163 mcp->mb[15] = MSW(mreq->send_dma);
5164 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5165 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5167 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5168 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5169 if (IS_CNA_CAPABLE(ha))
5170 mcp->out_mb |= MBX_2;
5173 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5174 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5175 mcp->in_mb |= MBX_1;
5176 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5177 mcp->in_mb |= MBX_3;
5179 mcp->tov = MBX_TOV_SECONDS;
5180 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5181 mcp->buf_size = mreq->transfer_size;
5183 rval = qla2x00_mailbox_command(vha, mcp);
5185 if (rval != QLA_SUCCESS) {
5186 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5187 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5188 rval, mcp->mb[0], mcp->mb[1]);
5190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5191 "Done %s.\n", __func__);
5194 /* Copy mailbox information */
5195 memcpy(mresp, mcp->mb, 64);
5200 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5204 mbx_cmd_t *mcp = &mc;
5206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5207 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5209 mcp->mb[0] = MBC_ISP84XX_RESET;
5210 mcp->mb[1] = enable_diagnostic;
5211 mcp->out_mb = MBX_1|MBX_0;
5212 mcp->in_mb = MBX_1|MBX_0;
5213 mcp->tov = MBX_TOV_SECONDS;
5214 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5215 rval = qla2x00_mailbox_command(vha, mcp);
5217 if (rval != QLA_SUCCESS)
5218 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5220 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5221 "Done %s.\n", __func__);
5227 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5231 mbx_cmd_t *mcp = &mc;
5233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5234 "Entered %s.\n", __func__);
5236 if (!IS_FWI2_CAPABLE(vha->hw))
5237 return QLA_FUNCTION_FAILED;
5239 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5240 mcp->mb[1] = LSW(risc_addr);
5241 mcp->mb[2] = LSW(data);
5242 mcp->mb[3] = MSW(data);
5243 mcp->mb[8] = MSW(risc_addr);
5244 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5245 mcp->in_mb = MBX_1|MBX_0;
5248 rval = qla2x00_mailbox_command(vha, mcp);
5249 if (rval != QLA_SUCCESS) {
5250 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5251 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5252 rval, mcp->mb[0], mcp->mb[1]);
5254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5255 "Done %s.\n", __func__);
5262 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5265 uint32_t stat, timer;
5267 struct qla_hw_data *ha = vha->hw;
5268 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5273 "Entered %s.\n", __func__);
5275 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5277 /* Write the MBC data to the registers */
5278 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5279 WRT_REG_WORD(®->mailbox1, mb[0]);
5280 WRT_REG_WORD(®->mailbox2, mb[1]);
5281 WRT_REG_WORD(®->mailbox3, mb[2]);
5282 WRT_REG_WORD(®->mailbox4, mb[3]);
5284 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
5286 /* Poll for MBC interrupt */
5287 for (timer = 6000000; timer; timer--) {
5288 /* Check for pending interrupts. */
5289 stat = RD_REG_DWORD(®->host_status);
5290 if (stat & HSRX_RISC_INT) {
5293 if (stat == 0x1 || stat == 0x2 ||
5294 stat == 0x10 || stat == 0x11) {
5295 set_bit(MBX_INTERRUPT,
5296 &ha->mbx_cmd_flags);
5297 mb0 = RD_REG_WORD(®->mailbox0);
5298 WRT_REG_DWORD(®->hccr,
5299 HCCRX_CLR_RISC_INT);
5300 RD_REG_DWORD(®->hccr);
5307 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5308 rval = mb0 & MBS_MASK;
5310 rval = QLA_FUNCTION_FAILED;
5312 if (rval != QLA_SUCCESS) {
5313 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5314 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5317 "Done %s.\n", __func__);
5323 /* Set the specified data rate */
5325 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5329 mbx_cmd_t *mcp = &mc;
5330 struct qla_hw_data *ha = vha->hw;
5333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5334 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5337 if (!IS_FWI2_CAPABLE(ha))
5338 return QLA_FUNCTION_FAILED;
5340 memset(mcp, 0, sizeof(*mcp));
5341 switch (ha->set_data_rate) {
5342 case PORT_SPEED_AUTO:
5343 case PORT_SPEED_4GB:
5344 case PORT_SPEED_8GB:
5345 case PORT_SPEED_16GB:
5346 case PORT_SPEED_32GB:
5347 val = ha->set_data_rate;
5350 ql_log(ql_log_warn, vha, 0x1199,
5351 "Unrecognized speed setting:%d. Setting Autoneg\n",
5353 val = ha->set_data_rate = PORT_SPEED_AUTO;
5357 mcp->mb[0] = MBC_DATA_RATE;
5361 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5362 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5363 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5364 mcp->in_mb |= MBX_4|MBX_3;
5365 mcp->tov = MBX_TOV_SECONDS;
5367 rval = qla2x00_mailbox_command(vha, mcp);
5368 if (rval != QLA_SUCCESS) {
5369 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5370 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5372 if (mcp->mb[1] != 0x7)
5373 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5374 "Speed set:0x%x\n", mcp->mb[1]);
5376 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5377 "Done %s.\n", __func__);
5384 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5388 mbx_cmd_t *mcp = &mc;
5389 struct qla_hw_data *ha = vha->hw;
5391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5392 "Entered %s.\n", __func__);
5394 if (!IS_FWI2_CAPABLE(ha))
5395 return QLA_FUNCTION_FAILED;
5397 mcp->mb[0] = MBC_DATA_RATE;
5398 mcp->mb[1] = QLA_GET_DATA_RATE;
5399 mcp->out_mb = MBX_1|MBX_0;
5400 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5401 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5402 mcp->in_mb |= MBX_4|MBX_3;
5403 mcp->tov = MBX_TOV_SECONDS;
5405 rval = qla2x00_mailbox_command(vha, mcp);
5406 if (rval != QLA_SUCCESS) {
5407 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5408 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5411 "Done %s.\n", __func__);
5412 if (mcp->mb[1] != 0x7)
5413 ha->link_data_rate = mcp->mb[1];
5420 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5424 mbx_cmd_t *mcp = &mc;
5425 struct qla_hw_data *ha = vha->hw;
5427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5428 "Entered %s.\n", __func__);
5430 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5431 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5432 return QLA_FUNCTION_FAILED;
5433 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5434 mcp->out_mb = MBX_0;
5435 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5436 mcp->tov = MBX_TOV_SECONDS;
5439 rval = qla2x00_mailbox_command(vha, mcp);
5441 if (rval != QLA_SUCCESS) {
5442 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5443 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5445 /* Copy all bits to preserve original value */
5446 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5449 "Done %s.\n", __func__);
5455 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5459 mbx_cmd_t *mcp = &mc;
5461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5462 "Entered %s.\n", __func__);
5464 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5465 /* Copy all bits to preserve original setting */
5466 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5467 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5469 mcp->tov = MBX_TOV_SECONDS;
5471 rval = qla2x00_mailbox_command(vha, mcp);
5473 if (rval != QLA_SUCCESS) {
5474 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5475 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5477 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5478 "Done %s.\n", __func__);
5485 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5490 mbx_cmd_t *mcp = &mc;
5491 struct qla_hw_data *ha = vha->hw;
5493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5494 "Entered %s.\n", __func__);
5496 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5497 return QLA_FUNCTION_FAILED;
5499 mcp->mb[0] = MBC_PORT_PARAMS;
5500 mcp->mb[1] = loop_id;
5501 if (ha->flags.fcp_prio_enabled)
5505 mcp->mb[4] = priority & 0xf;
5506 mcp->mb[9] = vha->vp_idx;
5507 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5508 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5511 rval = qla2x00_mailbox_command(vha, mcp);
5519 if (rval != QLA_SUCCESS) {
5520 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5523 "Done %s.\n", __func__);
5530 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5532 int rval = QLA_FUNCTION_FAILED;
5533 struct qla_hw_data *ha = vha->hw;
5536 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5537 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5538 "Thermal not supported by this card.\n");
5542 if (IS_QLA25XX(ha)) {
5543 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5544 ha->pdev->subsystem_device == 0x0175) {
5545 rval = qla2x00_read_sfp(vha, 0, &byte,
5546 0x98, 0x1, 1, BIT_13|BIT_0);
5550 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5551 ha->pdev->subsystem_device == 0x338e) {
5552 rval = qla2x00_read_sfp(vha, 0, &byte,
5553 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5557 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5558 "Thermal not supported by this card.\n");
5562 if (IS_QLA82XX(ha)) {
5563 *temp = qla82xx_read_temperature(vha);
5566 } else if (IS_QLA8044(ha)) {
5567 *temp = qla8044_read_temperature(vha);
5572 rval = qla2x00_read_asic_temperature(vha, temp);
5577 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5580 struct qla_hw_data *ha = vha->hw;
5582 mbx_cmd_t *mcp = &mc;
5584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5585 "Entered %s.\n", __func__);
5587 if (!IS_FWI2_CAPABLE(ha))
5588 return QLA_FUNCTION_FAILED;
5590 memset(mcp, 0, sizeof(mbx_cmd_t));
5591 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5594 mcp->out_mb = MBX_1|MBX_0;
5599 rval = qla2x00_mailbox_command(vha, mcp);
5600 if (rval != QLA_SUCCESS) {
5601 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5602 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5605 "Done %s.\n", __func__);
5612 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5615 struct qla_hw_data *ha = vha->hw;
5617 mbx_cmd_t *mcp = &mc;
5619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5620 "Entered %s.\n", __func__);
5622 if (!IS_P3P_TYPE(ha))
5623 return QLA_FUNCTION_FAILED;
5625 memset(mcp, 0, sizeof(mbx_cmd_t));
5626 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5629 mcp->out_mb = MBX_1|MBX_0;
5634 rval = qla2x00_mailbox_command(vha, mcp);
5635 if (rval != QLA_SUCCESS) {
5636 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5637 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5640 "Done %s.\n", __func__);
5647 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5649 struct qla_hw_data *ha = vha->hw;
5651 mbx_cmd_t *mcp = &mc;
5652 int rval = QLA_FUNCTION_FAILED;
5654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5655 "Entered %s.\n", __func__);
5657 memset(mcp->mb, 0 , sizeof(mcp->mb));
5658 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5659 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5660 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5661 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5663 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5664 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5665 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5667 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5668 mcp->tov = MBX_TOV_SECONDS;
5669 rval = qla2x00_mailbox_command(vha, mcp);
5671 /* Always copy back return mailbox values. */
5672 if (rval != QLA_SUCCESS) {
5673 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5674 "mailbox command FAILED=0x%x, subcode=%x.\n",
5675 (mcp->mb[1] << 16) | mcp->mb[0],
5676 (mcp->mb[3] << 16) | mcp->mb[2]);
5678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5679 "Done %s.\n", __func__);
5680 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5681 if (!ha->md_template_size) {
5682 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5683 "Null template size obtained.\n");
5684 rval = QLA_FUNCTION_FAILED;
5691 qla82xx_md_get_template(scsi_qla_host_t *vha)
5693 struct qla_hw_data *ha = vha->hw;
5695 mbx_cmd_t *mcp = &mc;
5696 int rval = QLA_FUNCTION_FAILED;
5698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5699 "Entered %s.\n", __func__);
5701 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5702 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5703 if (!ha->md_tmplt_hdr) {
5704 ql_log(ql_log_warn, vha, 0x1124,
5705 "Unable to allocate memory for Minidump template.\n");
5709 memset(mcp->mb, 0 , sizeof(mcp->mb));
5710 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5711 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5712 mcp->mb[2] = LSW(RQST_TMPLT);
5713 mcp->mb[3] = MSW(RQST_TMPLT);
5714 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5715 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5716 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5717 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5718 mcp->mb[8] = LSW(ha->md_template_size);
5719 mcp->mb[9] = MSW(ha->md_template_size);
5721 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5722 mcp->tov = MBX_TOV_SECONDS;
5723 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5724 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5725 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5726 rval = qla2x00_mailbox_command(vha, mcp);
5728 if (rval != QLA_SUCCESS) {
5729 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5730 "mailbox command FAILED=0x%x, subcode=%x.\n",
5731 ((mcp->mb[1] << 16) | mcp->mb[0]),
5732 ((mcp->mb[3] << 16) | mcp->mb[2]));
5734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5735 "Done %s.\n", __func__);
5740 qla8044_md_get_template(scsi_qla_host_t *vha)
5742 struct qla_hw_data *ha = vha->hw;
5744 mbx_cmd_t *mcp = &mc;
5745 int rval = QLA_FUNCTION_FAILED;
5746 int offset = 0, size = MINIDUMP_SIZE_36K;
5748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5749 "Entered %s.\n", __func__);
5751 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5752 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5753 if (!ha->md_tmplt_hdr) {
5754 ql_log(ql_log_warn, vha, 0xb11b,
5755 "Unable to allocate memory for Minidump template.\n");
5759 memset(mcp->mb, 0 , sizeof(mcp->mb));
5760 while (offset < ha->md_template_size) {
5761 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5762 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5763 mcp->mb[2] = LSW(RQST_TMPLT);
5764 mcp->mb[3] = MSW(RQST_TMPLT);
5765 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5766 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5767 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5768 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5769 mcp->mb[8] = LSW(size);
5770 mcp->mb[9] = MSW(size);
5771 mcp->mb[10] = offset & 0x0000FFFF;
5772 mcp->mb[11] = offset & 0xFFFF0000;
5773 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5774 mcp->tov = MBX_TOV_SECONDS;
5775 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5776 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5777 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5778 rval = qla2x00_mailbox_command(vha, mcp);
5780 if (rval != QLA_SUCCESS) {
5781 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5782 "mailbox command FAILED=0x%x, subcode=%x.\n",
5783 ((mcp->mb[1] << 16) | mcp->mb[0]),
5784 ((mcp->mb[3] << 16) | mcp->mb[2]));
5787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5788 "Done %s.\n", __func__);
5789 offset = offset + size;
5795 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5798 struct qla_hw_data *ha = vha->hw;
5800 mbx_cmd_t *mcp = &mc;
5802 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5803 return QLA_FUNCTION_FAILED;
5805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5806 "Entered %s.\n", __func__);
5808 memset(mcp, 0, sizeof(mbx_cmd_t));
5809 mcp->mb[0] = MBC_SET_LED_CONFIG;
5810 mcp->mb[1] = led_cfg[0];
5811 mcp->mb[2] = led_cfg[1];
5812 if (IS_QLA8031(ha)) {
5813 mcp->mb[3] = led_cfg[2];
5814 mcp->mb[4] = led_cfg[3];
5815 mcp->mb[5] = led_cfg[4];
5816 mcp->mb[6] = led_cfg[5];
5819 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5821 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5826 rval = qla2x00_mailbox_command(vha, mcp);
5827 if (rval != QLA_SUCCESS) {
5828 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5829 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5832 "Done %s.\n", __func__);
5839 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5842 struct qla_hw_data *ha = vha->hw;
5844 mbx_cmd_t *mcp = &mc;
5846 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5847 return QLA_FUNCTION_FAILED;
5849 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5850 "Entered %s.\n", __func__);
5852 memset(mcp, 0, sizeof(mbx_cmd_t));
5853 mcp->mb[0] = MBC_GET_LED_CONFIG;
5855 mcp->out_mb = MBX_0;
5856 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5858 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5862 rval = qla2x00_mailbox_command(vha, mcp);
5863 if (rval != QLA_SUCCESS) {
5864 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5865 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5867 led_cfg[0] = mcp->mb[1];
5868 led_cfg[1] = mcp->mb[2];
5869 if (IS_QLA8031(ha)) {
5870 led_cfg[2] = mcp->mb[3];
5871 led_cfg[3] = mcp->mb[4];
5872 led_cfg[4] = mcp->mb[5];
5873 led_cfg[5] = mcp->mb[6];
5875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5876 "Done %s.\n", __func__);
5883 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5886 struct qla_hw_data *ha = vha->hw;
5888 mbx_cmd_t *mcp = &mc;
5890 if (!IS_P3P_TYPE(ha))
5891 return QLA_FUNCTION_FAILED;
5893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5894 "Entered %s.\n", __func__);
5896 memset(mcp, 0, sizeof(mbx_cmd_t));
5897 mcp->mb[0] = MBC_SET_LED_CONFIG;
5903 mcp->out_mb = MBX_7|MBX_0;
5905 mcp->tov = MBX_TOV_SECONDS;
5908 rval = qla2x00_mailbox_command(vha, mcp);
5909 if (rval != QLA_SUCCESS) {
5910 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5911 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5914 "Done %s.\n", __func__);
5921 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5924 struct qla_hw_data *ha = vha->hw;
5926 mbx_cmd_t *mcp = &mc;
5928 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5929 return QLA_FUNCTION_FAILED;
5931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5932 "Entered %s.\n", __func__);
5934 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5935 mcp->mb[1] = LSW(reg);
5936 mcp->mb[2] = MSW(reg);
5937 mcp->mb[3] = LSW(data);
5938 mcp->mb[4] = MSW(data);
5939 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5941 mcp->in_mb = MBX_1|MBX_0;
5942 mcp->tov = MBX_TOV_SECONDS;
5944 rval = qla2x00_mailbox_command(vha, mcp);
5946 if (rval != QLA_SUCCESS) {
5947 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5948 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5950 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5951 "Done %s.\n", __func__);
5958 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5961 struct qla_hw_data *ha = vha->hw;
5963 mbx_cmd_t *mcp = &mc;
5965 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5967 "Implicit LOGO Unsupported.\n");
5968 return QLA_FUNCTION_FAILED;
5972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5973 "Entering %s.\n", __func__);
5975 /* Perform Implicit LOGO. */
5976 mcp->mb[0] = MBC_PORT_LOGOUT;
5977 mcp->mb[1] = fcport->loop_id;
5978 mcp->mb[10] = BIT_15;
5979 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5981 mcp->tov = MBX_TOV_SECONDS;
5983 rval = qla2x00_mailbox_command(vha, mcp);
5984 if (rval != QLA_SUCCESS)
5985 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5986 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5989 "Done %s.\n", __func__);
5995 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5999 mbx_cmd_t *mcp = &mc;
6000 struct qla_hw_data *ha = vha->hw;
6001 unsigned long retry_max_time = jiffies + (2 * HZ);
6003 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6004 return QLA_FUNCTION_FAILED;
6006 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6009 mcp->mb[0] = MBC_READ_REMOTE_REG;
6010 mcp->mb[1] = LSW(reg);
6011 mcp->mb[2] = MSW(reg);
6012 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6013 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6014 mcp->tov = MBX_TOV_SECONDS;
6016 rval = qla2x00_mailbox_command(vha, mcp);
6018 if (rval != QLA_SUCCESS) {
6019 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6020 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6021 rval, mcp->mb[0], mcp->mb[1]);
6023 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6024 if (*data == QLA8XXX_BAD_VALUE) {
6026 * During soft-reset CAMRAM register reads might
6027 * return 0xbad0bad0. So retry for MAX of 2 sec
6028 * while reading camram registers.
6030 if (time_after(jiffies, retry_max_time)) {
6031 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6032 "Failure to read CAMRAM register. "
6033 "data=0x%x.\n", *data);
6034 return QLA_FUNCTION_FAILED;
6039 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6046 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6050 mbx_cmd_t *mcp = &mc;
6051 struct qla_hw_data *ha = vha->hw;
6053 if (!IS_QLA83XX(ha))
6054 return QLA_FUNCTION_FAILED;
6056 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6058 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6059 mcp->out_mb = MBX_0;
6060 mcp->in_mb = MBX_1|MBX_0;
6061 mcp->tov = MBX_TOV_SECONDS;
6063 rval = qla2x00_mailbox_command(vha, mcp);
6065 if (rval != QLA_SUCCESS) {
6066 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6067 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6068 rval, mcp->mb[0], mcp->mb[1]);
6069 ha->isp_ops->fw_dump(vha, 0);
6071 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6078 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6079 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6083 mbx_cmd_t *mcp = &mc;
6084 uint8_t subcode = (uint8_t)options;
6085 struct qla_hw_data *ha = vha->hw;
6087 if (!IS_QLA8031(ha))
6088 return QLA_FUNCTION_FAILED;
6090 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6092 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6093 mcp->mb[1] = options;
6094 mcp->out_mb = MBX_1|MBX_0;
6095 if (subcode & BIT_2) {
6096 mcp->mb[2] = LSW(start_addr);
6097 mcp->mb[3] = MSW(start_addr);
6098 mcp->mb[4] = LSW(end_addr);
6099 mcp->mb[5] = MSW(end_addr);
6100 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6102 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6103 if (!(subcode & (BIT_2 | BIT_5)))
6104 mcp->in_mb |= MBX_4|MBX_3;
6105 mcp->tov = MBX_TOV_SECONDS;
6107 rval = qla2x00_mailbox_command(vha, mcp);
6109 if (rval != QLA_SUCCESS) {
6110 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6111 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6112 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6114 ha->isp_ops->fw_dump(vha, 0);
6116 if (subcode & BIT_5)
6117 *sector_size = mcp->mb[1];
6118 else if (subcode & (BIT_6 | BIT_7)) {
6119 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6120 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6121 } else if (subcode & (BIT_3 | BIT_4)) {
6122 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6123 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6125 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6132 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6137 mbx_cmd_t *mcp = &mc;
6139 if (!IS_MCTP_CAPABLE(vha->hw))
6140 return QLA_FUNCTION_FAILED;
6142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6143 "Entered %s.\n", __func__);
6145 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6146 mcp->mb[1] = LSW(addr);
6147 mcp->mb[2] = MSW(req_dma);
6148 mcp->mb[3] = LSW(req_dma);
6149 mcp->mb[4] = MSW(size);
6150 mcp->mb[5] = LSW(size);
6151 mcp->mb[6] = MSW(MSD(req_dma));
6152 mcp->mb[7] = LSW(MSD(req_dma));
6153 mcp->mb[8] = MSW(addr);
6154 /* Setting RAM ID to valid */
6155 /* For MCTP RAM ID is 0x40 */
6156 mcp->mb[10] = BIT_7 | 0x40;
6158 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6162 mcp->tov = MBX_TOV_SECONDS;
6164 rval = qla2x00_mailbox_command(vha, mcp);
6166 if (rval != QLA_SUCCESS) {
6167 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6168 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6170 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6171 "Done %s.\n", __func__);
6178 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6179 void *dd_buf, uint size, uint options)
6183 mbx_cmd_t *mcp = &mc;
6186 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6187 !IS_QLA28XX(vha->hw))
6188 return QLA_FUNCTION_FAILED;
6190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6191 "Entered %s.\n", __func__);
6193 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6194 dd_buf, size, DMA_FROM_DEVICE);
6195 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6196 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6197 return QLA_MEMORY_ALLOC_FAILED;
6200 memset(dd_buf, 0, size);
6202 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6203 mcp->mb[1] = options;
6204 mcp->mb[2] = MSW(LSD(dd_dma));
6205 mcp->mb[3] = LSW(LSD(dd_dma));
6206 mcp->mb[6] = MSW(MSD(dd_dma));
6207 mcp->mb[7] = LSW(MSD(dd_dma));
6209 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6210 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6211 mcp->buf_size = size;
6212 mcp->flags = MBX_DMA_IN;
6213 mcp->tov = MBX_TOV_SECONDS * 4;
6214 rval = qla2x00_mailbox_command(vha, mcp);
6216 if (rval != QLA_SUCCESS) {
6217 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6220 "Done %s.\n", __func__);
6223 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6224 size, DMA_FROM_DEVICE);
6229 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6231 sp->u.iocb_cmd.u.mbx.rc = res;
6233 complete(&sp->u.iocb_cmd.u.mbx.comp);
6234 /* don't free sp here. Let the caller do the free */
6238 * This mailbox uses the iocb interface to send MB command.
6239 * This allows non-critial (non chip setup) command to go
6242 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6244 int rval = QLA_FUNCTION_FAILED;
6248 if (!vha->hw->flags.fw_started)
6251 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6255 sp->type = SRB_MB_IOCB;
6256 sp->name = mb_to_str(mcp->mb[0]);
6258 c = &sp->u.iocb_cmd;
6259 c->timeout = qla2x00_async_iocb_timeout;
6260 init_completion(&c->u.mbx.comp);
6262 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6264 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6266 sp->done = qla2x00_async_mb_sp_done;
6268 rval = qla2x00_start_sp(sp);
6269 if (rval != QLA_SUCCESS) {
6270 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6271 "%s: %s Failed submission. %x.\n",
6272 __func__, sp->name, rval);
6276 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6277 sp->name, sp->handle);
6279 wait_for_completion(&c->u.mbx.comp);
6280 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6284 case QLA_FUNCTION_TIMEOUT:
6285 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6286 __func__, sp->name, rval);
6289 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6290 __func__, sp->name);
6293 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6294 __func__, sp->name, rval);
6306 * NOTE: Do not call this routine from DPC thread
6308 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6310 int rval = QLA_FUNCTION_FAILED;
6312 struct port_database_24xx *pd;
6313 struct qla_hw_data *ha = vha->hw;
6316 if (!vha->hw->flags.fw_started)
6319 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6321 ql_log(ql_log_warn, vha, 0xd047,
6322 "Failed to allocate port database structure.\n");
6326 memset(&mc, 0, sizeof(mc));
6327 mc.mb[0] = MBC_GET_PORT_DATABASE;
6328 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6329 mc.mb[2] = MSW(pd_dma);
6330 mc.mb[3] = LSW(pd_dma);
6331 mc.mb[6] = MSW(MSD(pd_dma));
6332 mc.mb[7] = LSW(MSD(pd_dma));
6333 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6334 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6336 rval = qla24xx_send_mb_cmd(vha, &mc);
6337 if (rval != QLA_SUCCESS) {
6338 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6339 "%s: %8phC fail\n", __func__, fcport->port_name);
6343 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6345 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6346 __func__, fcport->port_name);
6350 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6355 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6356 struct port_database_24xx *pd)
6358 int rval = QLA_SUCCESS;
6360 u8 current_login_state, last_login_state;
6362 if (NVME_TARGET(vha->hw, fcport)) {
6363 current_login_state = pd->current_login_state >> 4;
6364 last_login_state = pd->last_login_state >> 4;
6366 current_login_state = pd->current_login_state & 0xf;
6367 last_login_state = pd->last_login_state & 0xf;
6370 /* Check for logged in state. */
6371 if (current_login_state != PDS_PRLI_COMPLETE) {
6372 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6373 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6374 current_login_state, last_login_state, fcport->loop_id);
6375 rval = QLA_FUNCTION_FAILED;
6379 if (fcport->loop_id == FC_NO_LOOP_ID ||
6380 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6381 memcmp(fcport->port_name, pd->port_name, 8))) {
6382 /* We lost the device mid way. */
6383 rval = QLA_NOT_LOGGED_IN;
6387 /* Names are little-endian. */
6388 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6389 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6391 /* Get port_id of device. */
6392 fcport->d_id.b.domain = pd->port_id[0];
6393 fcport->d_id.b.area = pd->port_id[1];
6394 fcport->d_id.b.al_pa = pd->port_id[2];
6395 fcport->d_id.b.rsvd_1 = 0;
6397 if (NVME_TARGET(vha->hw, fcport)) {
6398 fcport->port_type = FCT_NVME;
6399 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6400 fcport->port_type |= FCT_NVME_INITIATOR;
6401 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6402 fcport->port_type |= FCT_NVME_TARGET;
6403 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6404 fcport->port_type |= FCT_NVME_DISCOVERY;
6406 /* If not target must be initiator or unknown type. */
6407 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6408 fcport->port_type = FCT_INITIATOR;
6410 fcport->port_type = FCT_TARGET;
6412 /* Passback COS information. */
6413 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6414 FC_COS_CLASS2 : FC_COS_CLASS3;
6416 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6417 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6418 fcport->conf_compl_supported = 1;
6426 * qla24xx_gidlist__wait
6427 * NOTE: don't call this routine from DPC thread.
6429 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6430 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6432 int rval = QLA_FUNCTION_FAILED;
6435 if (!vha->hw->flags.fw_started)
6438 memset(&mc, 0, sizeof(mc));
6439 mc.mb[0] = MBC_GET_ID_LIST;
6440 mc.mb[2] = MSW(id_list_dma);
6441 mc.mb[3] = LSW(id_list_dma);
6442 mc.mb[6] = MSW(MSD(id_list_dma));
6443 mc.mb[7] = LSW(MSD(id_list_dma));
6445 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6447 rval = qla24xx_send_mb_cmd(vha, &mc);
6448 if (rval != QLA_SUCCESS) {
6449 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6450 "%s: fail\n", __func__);
6452 *entries = mc.mb[1];
6453 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6454 "%s: done\n", __func__);
6460 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6464 mbx_cmd_t *mcp = &mc;
6466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6467 "Entered %s\n", __func__);
6469 memset(mcp->mb, 0 , sizeof(mcp->mb));
6470 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6471 mcp->mb[1] = cpu_to_le16(1);
6472 mcp->mb[2] = cpu_to_le16(value);
6473 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6474 mcp->in_mb = MBX_2 | MBX_0;
6475 mcp->tov = MBX_TOV_SECONDS;
6478 rval = qla2x00_mailbox_command(vha, mcp);
6480 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6481 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6486 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6490 mbx_cmd_t *mcp = &mc;
6492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6493 "Entered %s\n", __func__);
6495 memset(mcp->mb, 0, sizeof(mcp->mb));
6496 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6497 mcp->mb[1] = cpu_to_le16(0);
6498 mcp->out_mb = MBX_1 | MBX_0;
6499 mcp->in_mb = MBX_2 | MBX_0;
6500 mcp->tov = MBX_TOV_SECONDS;
6503 rval = qla2x00_mailbox_command(vha, mcp);
6504 if (rval == QLA_SUCCESS)
6507 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6508 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6514 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6516 struct qla_hw_data *ha = vha->hw;
6517 uint16_t iter, addr, offset;
6518 dma_addr_t phys_addr;
6522 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6524 phys_addr = ha->sfp_data_dma;
6525 sfp_data = ha->sfp_data;
6528 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6530 /* Skip to next device address. */
6535 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6536 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6537 if (rval != QLA_SUCCESS) {
6538 ql_log(ql_log_warn, vha, 0x706d,
6539 "Unable to read SFP data (%x/%x/%x).\n", rval,
6545 if (buf && (c < count)) {
6548 if ((count - c) >= SFP_BLOCK_SIZE)
6549 sz = SFP_BLOCK_SIZE;
6553 memcpy(buf, sfp_data, sz);
6554 buf += SFP_BLOCK_SIZE;
6557 phys_addr += SFP_BLOCK_SIZE;
6558 sfp_data += SFP_BLOCK_SIZE;
6559 offset += SFP_BLOCK_SIZE;
6565 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6566 uint16_t *out_mb, int out_mb_sz)
6568 int rval = QLA_FUNCTION_FAILED;
6571 if (!vha->hw->flags.fw_started)
6574 memset(&mc, 0, sizeof(mc));
6575 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6577 rval = qla24xx_send_mb_cmd(vha, &mc);
6578 if (rval != QLA_SUCCESS) {
6579 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6580 "%s: fail\n", __func__);
6582 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6583 memcpy(out_mb, mc.mb, out_mb_sz);
6585 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6587 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6588 "%s: done\n", __func__);
6594 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6595 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6600 mbx_cmd_t *mcp = &mc;
6602 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6604 mcp->mb[2] = region;
6605 mcp->mb[3] = MSW(len);
6606 mcp->mb[4] = LSW(len);
6607 mcp->mb[5] = MSW(sfub_dma_addr);
6608 mcp->mb[6] = LSW(sfub_dma_addr);
6609 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6610 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6611 mcp->mb[9] = sfub_len;
6613 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6614 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6615 mcp->tov = MBX_TOV_SECONDS;
6617 rval = qla2x00_mailbox_command(vha, mcp);
6619 if (rval != QLA_SUCCESS) {
6620 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6621 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6628 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6633 mbx_cmd_t *mcp = &mc;
6635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6636 "Entered %s.\n", __func__);
6638 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6639 mcp->mb[1] = LSW(addr);
6640 mcp->mb[2] = MSW(addr);
6641 mcp->mb[3] = LSW(data);
6642 mcp->mb[4] = MSW(data);
6643 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6644 mcp->in_mb = MBX_1|MBX_0;
6645 mcp->tov = MBX_TOV_SECONDS;
6647 rval = qla2x00_mailbox_command(vha, mcp);
6649 if (rval != QLA_SUCCESS) {
6650 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6654 "Done %s.\n", __func__);
6660 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6665 mbx_cmd_t *mcp = &mc;
6667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6668 "Entered %s.\n", __func__);
6670 mcp->mb[0] = MBC_READ_REMOTE_REG;
6671 mcp->mb[1] = LSW(addr);
6672 mcp->mb[2] = MSW(addr);
6673 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6674 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6675 mcp->tov = MBX_TOV_SECONDS;
6677 rval = qla2x00_mailbox_command(vha, mcp);
6679 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6681 if (rval != QLA_SUCCESS) {
6682 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6683 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6686 "Done %s.\n", __func__);