2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
22 static const char *mb_to_str(uint16_t cmd)
25 struct mb_cmd_name *e;
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
35 static struct rom_cmd {
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
57 { MBC_INITIALIZE_MULTIQ },
58 { MBC_IOCB_COMMAND_A64 },
59 { MBC_GET_ADAPTER_LOOP_ID },
63 static int is_rom_cmd(uint16_t cmd)
68 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
78 * qla2x00_mailbox_command
79 * Issue mailbox command and waits for completion.
82 * ha = adapter block pointer.
83 * mcp = driver internal mbx struct pointer.
86 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
89 * 0 : QLA_SUCCESS = cmd performed success
90 * 1 : QLA_FUNCTION_FAILED (error encountered)
91 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
97 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
100 unsigned long flags = 0;
102 uint8_t abort_active;
104 uint16_t command = 0;
106 uint16_t __iomem *optr;
109 unsigned long wait_time;
110 struct qla_hw_data *ha = vha->hw;
111 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
114 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
116 if (ha->pdev->error_state > pci_channel_io_frozen) {
117 ql_log(ql_log_warn, vha, 0x1001,
118 "error_state is greater than pci_channel_io_frozen, "
120 return QLA_FUNCTION_TIMEOUT;
123 if (vha->device_flags & DFLG_DEV_FAILED) {
124 ql_log(ql_log_warn, vha, 0x1002,
125 "Device in failed state, exiting.\n");
126 return QLA_FUNCTION_TIMEOUT;
129 /* if PCI error, then avoid mbx processing.*/
130 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
131 test_bit(UNLOADING, &base_vha->dpc_flags)) {
132 ql_log(ql_log_warn, vha, 0xd04e,
133 "PCI error, exiting.\n");
134 return QLA_FUNCTION_TIMEOUT;
138 io_lock_on = base_vha->flags.init_done;
141 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
144 if (ha->flags.pci_channel_io_perm_failure) {
145 ql_log(ql_log_warn, vha, 0x1003,
146 "Perm failure on EEH timeout MBX, exiting.\n");
147 return QLA_FUNCTION_TIMEOUT;
150 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
151 /* Setting Link-Down error */
152 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
153 ql_log(ql_log_warn, vha, 0x1004,
154 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
155 return QLA_FUNCTION_TIMEOUT;
158 /* check if ISP abort is active and return cmd with timeout */
159 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
160 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
161 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
162 !is_rom_cmd(mcp->mb[0])) {
163 ql_log(ql_log_info, vha, 0x1005,
164 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
166 return QLA_FUNCTION_TIMEOUT;
170 * Wait for active mailbox commands to finish by waiting at most tov
171 * seconds. This is to serialize actual issuing of mailbox cmds during
172 * non ISP abort time.
174 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
175 /* Timeout occurred. Return error. */
176 ql_log(ql_log_warn, vha, 0xd035,
177 "Cmd access timeout, cmd=0x%x, Exiting.\n",
179 return QLA_FUNCTION_TIMEOUT;
182 ha->flags.mbox_busy = 1;
183 /* Save mailbox command for debug */
186 ql_dbg(ql_dbg_mbx, vha, 0x1006,
187 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
189 spin_lock_irqsave(&ha->hardware_lock, flags);
191 /* Load mailbox registers. */
193 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
194 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
195 optr = (uint16_t __iomem *)®->isp24.mailbox0;
197 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
200 command = mcp->mb[0];
201 mboxes = mcp->out_mb;
203 ql_dbg(ql_dbg_mbx, vha, 0x1111,
204 "Mailbox registers (OUT):\n");
205 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
206 if (IS_QLA2200(ha) && cnt == 8)
208 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
209 if (mboxes & BIT_0) {
210 ql_dbg(ql_dbg_mbx, vha, 0x1112,
211 "mbox[%d]<-0x%04x\n", cnt, *iptr);
212 WRT_REG_WORD(optr, *iptr);
220 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
221 "I/O Address = %p.\n", optr);
223 /* Issue set host interrupt command to send cmd out. */
224 ha->flags.mbox_int = 0;
225 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
227 /* Unlock mbx registers and wait for interrupt */
228 ql_dbg(ql_dbg_mbx, vha, 0x100f,
229 "Going to unlock irq & waiting for interrupts. "
230 "jiffies=%lx.\n", jiffies);
232 /* Wait for mbx cmd completion until timeout */
234 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
235 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
237 if (IS_P3P_TYPE(ha)) {
238 if (RD_REG_DWORD(®->isp82.hint) &
239 HINT_MBX_INT_PENDING) {
240 spin_unlock_irqrestore(&ha->hardware_lock,
242 ha->flags.mbox_busy = 0;
243 ql_dbg(ql_dbg_mbx, vha, 0x1010,
244 "Pending mailbox timeout, exiting.\n");
245 rval = QLA_FUNCTION_TIMEOUT;
248 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
249 } else if (IS_FWI2_CAPABLE(ha))
250 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
252 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
256 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
258 ql_dbg(ql_dbg_mbx, vha, 0x117a,
259 "cmd=%x Timeout.\n", command);
260 spin_lock_irqsave(&ha->hardware_lock, flags);
261 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
262 spin_unlock_irqrestore(&ha->hardware_lock, flags);
264 if (time_after(jiffies, wait_time + 5 * HZ))
265 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
266 command, jiffies_to_msecs(jiffies - wait_time));
268 ql_dbg(ql_dbg_mbx, vha, 0x1011,
269 "Cmd=%x Polling Mode.\n", command);
271 if (IS_P3P_TYPE(ha)) {
272 if (RD_REG_DWORD(®->isp82.hint) &
273 HINT_MBX_INT_PENDING) {
274 spin_unlock_irqrestore(&ha->hardware_lock,
276 ha->flags.mbox_busy = 0;
277 ql_dbg(ql_dbg_mbx, vha, 0x1012,
278 "Pending mailbox timeout, exiting.\n");
279 rval = QLA_FUNCTION_TIMEOUT;
282 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
283 } else if (IS_FWI2_CAPABLE(ha))
284 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
286 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
289 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
290 while (!ha->flags.mbox_int) {
291 if (time_after(jiffies, wait_time))
294 /* Check for pending interrupts. */
295 qla2x00_poll(ha->rsp_q_map[0]);
297 if (!ha->flags.mbox_int &&
299 command == MBC_LOAD_RISC_RAM_EXTENDED))
302 ql_dbg(ql_dbg_mbx, vha, 0x1013,
304 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
307 /* Check whether we timed out */
308 if (ha->flags.mbox_int) {
311 ql_dbg(ql_dbg_mbx, vha, 0x1014,
312 "Cmd=%x completed.\n", command);
314 /* Got interrupt. Clear the flag. */
315 ha->flags.mbox_int = 0;
316 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
318 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
319 ha->flags.mbox_busy = 0;
320 /* Setting Link-Down error */
321 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
323 rval = QLA_FUNCTION_FAILED;
324 ql_log(ql_log_warn, vha, 0xd048,
325 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
329 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
330 rval = QLA_FUNCTION_FAILED;
332 /* Load return mailbox registers. */
334 iptr = (uint16_t *)&ha->mailbox_out[0];
337 ql_dbg(ql_dbg_mbx, vha, 0x1113,
338 "Mailbox registers (IN):\n");
339 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
340 if (mboxes & BIT_0) {
342 ql_dbg(ql_dbg_mbx, vha, 0x1114,
343 "mbox[%d]->0x%04x\n", cnt, *iptr2);
353 uint32_t ictrl, host_status, hccr;
356 if (IS_FWI2_CAPABLE(ha)) {
357 mb[0] = RD_REG_WORD(®->isp24.mailbox0);
358 mb[1] = RD_REG_WORD(®->isp24.mailbox1);
359 mb[2] = RD_REG_WORD(®->isp24.mailbox2);
360 mb[3] = RD_REG_WORD(®->isp24.mailbox3);
361 mb[7] = RD_REG_WORD(®->isp24.mailbox7);
362 ictrl = RD_REG_DWORD(®->isp24.ictrl);
363 host_status = RD_REG_DWORD(®->isp24.host_status);
364 hccr = RD_REG_DWORD(®->isp24.hccr);
366 ql_log(ql_log_warn, vha, 0xd04c,
367 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
368 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
369 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
370 mb[7], host_status, hccr);
373 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
374 ictrl = RD_REG_WORD(®->isp.ictrl);
375 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
376 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
377 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
379 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
381 /* Capture FW dump only, if PCI device active */
382 if (!pci_channel_offline(vha->hw->pdev)) {
383 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
384 if (w == 0xffff || ictrl == 0xffffffff) {
385 /* This is special case if there is unload
386 * of driver happening and if PCI device go
387 * into bad state due to PCI error condition
388 * then only PCI ERR flag would be set.
389 * we will do premature exit for above case.
391 ha->flags.mbox_busy = 0;
392 rval = QLA_FUNCTION_TIMEOUT;
396 /* Attempt to capture firmware dump for further
397 * anallysis of the current formware state. we do not
398 * need to do this if we are intentionally generating
401 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
402 ha->isp_ops->fw_dump(vha, 0);
403 rval = QLA_FUNCTION_TIMEOUT;
407 ha->flags.mbox_busy = 0;
412 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
413 ql_dbg(ql_dbg_mbx, vha, 0x101a,
414 "Checking for additional resp interrupt.\n");
416 /* polling mode for non isp_abort commands. */
417 qla2x00_poll(ha->rsp_q_map[0]);
420 if (rval == QLA_FUNCTION_TIMEOUT &&
421 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
422 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
423 ha->flags.eeh_busy) {
424 /* not in dpc. schedule it for dpc to take over. */
425 ql_dbg(ql_dbg_mbx, vha, 0x101b,
426 "Timeout, schedule isp_abort_needed.\n");
428 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
429 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
430 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
431 if (IS_QLA82XX(ha)) {
432 ql_dbg(ql_dbg_mbx, vha, 0x112a,
433 "disabling pause transmit on port "
436 QLA82XX_CRB_NIU + 0x98,
437 CRB_NIU_XG_PAUSE_CTL_P0|
438 CRB_NIU_XG_PAUSE_CTL_P1);
440 ql_log(ql_log_info, base_vha, 0x101c,
441 "Mailbox cmd timeout occurred, cmd=0x%x, "
442 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
443 "abort.\n", command, mcp->mb[0],
445 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
446 qla2xxx_wake_dpc(vha);
448 } else if (!abort_active) {
449 /* call abort directly since we are in the DPC thread */
450 ql_dbg(ql_dbg_mbx, vha, 0x101d,
451 "Timeout, calling abort_isp.\n");
453 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
454 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
455 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
456 if (IS_QLA82XX(ha)) {
457 ql_dbg(ql_dbg_mbx, vha, 0x112b,
458 "disabling pause transmit on port "
461 QLA82XX_CRB_NIU + 0x98,
462 CRB_NIU_XG_PAUSE_CTL_P0|
463 CRB_NIU_XG_PAUSE_CTL_P1);
465 ql_log(ql_log_info, base_vha, 0x101e,
466 "Mailbox cmd timeout occurred, cmd=0x%x, "
467 "mb[0]=0x%x. Scheduling ISP abort ",
468 command, mcp->mb[0]);
469 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
470 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
471 /* Allow next mbx cmd to come in. */
472 complete(&ha->mbx_cmd_comp);
473 if (ha->isp_ops->abort_isp(vha)) {
474 /* Failed. retry later. */
475 set_bit(ISP_ABORT_NEEDED,
478 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
479 ql_dbg(ql_dbg_mbx, vha, 0x101f,
480 "Finished abort_isp.\n");
487 /* Allow next mbx cmd to come in. */
488 complete(&ha->mbx_cmd_comp);
492 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
493 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
494 dev_name(&ha->pdev->dev), 0x1020+0x800,
498 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
499 if (mboxes & BIT_0) {
500 printk(" mb[%u]=%x", i, mcp->mb[i]);
503 pr_warn(" cmd=%x ****\n", command);
505 ql_dbg(ql_dbg_mbx, vha, 0x1198,
506 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
507 RD_REG_DWORD(®->isp24.host_status),
508 RD_REG_DWORD(®->isp24.ictrl),
509 RD_REG_DWORD(®->isp24.istatus));
511 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
518 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
519 uint32_t risc_code_size)
522 struct qla_hw_data *ha = vha->hw;
524 mbx_cmd_t *mcp = &mc;
526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
527 "Entered %s.\n", __func__);
529 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
530 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
531 mcp->mb[8] = MSW(risc_addr);
532 mcp->out_mb = MBX_8|MBX_0;
534 mcp->mb[0] = MBC_LOAD_RISC_RAM;
537 mcp->mb[1] = LSW(risc_addr);
538 mcp->mb[2] = MSW(req_dma);
539 mcp->mb[3] = LSW(req_dma);
540 mcp->mb[6] = MSW(MSD(req_dma));
541 mcp->mb[7] = LSW(MSD(req_dma));
542 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
543 if (IS_FWI2_CAPABLE(ha)) {
544 mcp->mb[4] = MSW(risc_code_size);
545 mcp->mb[5] = LSW(risc_code_size);
546 mcp->out_mb |= MBX_5|MBX_4;
548 mcp->mb[4] = LSW(risc_code_size);
549 mcp->out_mb |= MBX_4;
553 mcp->tov = MBX_TOV_SECONDS;
555 rval = qla2x00_mailbox_command(vha, mcp);
557 if (rval != QLA_SUCCESS) {
558 ql_dbg(ql_dbg_mbx, vha, 0x1023,
559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
562 "Done %s.\n", __func__);
568 #define EXTENDED_BB_CREDITS BIT_0
569 #define NVME_ENABLE_FLAG BIT_3
570 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
572 uint16_t mb4 = BIT_0;
574 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
575 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
580 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
582 uint16_t mb4 = BIT_0;
584 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
585 struct nvram_81xx *nv = ha->nvram;
587 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
595 * Start adapter firmware.
598 * ha = adapter block pointer.
599 * TARGET_QUEUE_LOCK must be released.
600 * ADAPTER_STATE_LOCK must be released.
603 * qla2x00 local function return status code.
609 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
612 struct qla_hw_data *ha = vha->hw;
614 mbx_cmd_t *mcp = &mc;
616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
617 "Entered %s.\n", __func__);
619 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
622 if (IS_FWI2_CAPABLE(ha)) {
623 mcp->mb[1] = MSW(risc_addr);
624 mcp->mb[2] = LSW(risc_addr);
628 ha->flags.using_lr_setting = 0;
629 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
631 if (ql2xautodetectsfp) {
632 if (ha->flags.detected_lr_sfp) {
634 qla25xx_set_sfp_lr_dist(ha);
635 ha->flags.using_lr_setting = 1;
638 struct nvram_81xx *nv = ha->nvram;
639 /* set LR distance if specified in nvram */
640 if (nv->enhanced_features &
641 NEF_LR_DIST_ENABLE) {
643 qla25xx_set_nvr_lr_dist(ha);
644 ha->flags.using_lr_setting = 1;
649 if (ql2xnvmeenable && IS_QLA27XX(ha))
650 mcp->mb[4] |= NVME_ENABLE_FLAG;
652 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
653 struct nvram_81xx *nv = ha->nvram;
654 /* set minimum speed if specified in nvram */
655 if (nv->min_link_speed >= 2 &&
656 nv->min_link_speed <= 5) {
658 mcp->mb[11] = nv->min_link_speed;
659 mcp->out_mb |= MBX_11;
661 vha->min_link_speed_feat = nv->min_link_speed;
665 if (ha->flags.exlogins_enabled)
666 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
668 if (ha->flags.exchoffld_enabled)
669 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
671 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
672 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
674 mcp->mb[1] = LSW(risc_addr);
675 mcp->out_mb |= MBX_1;
676 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
678 mcp->out_mb |= MBX_2;
682 mcp->tov = MBX_TOV_SECONDS;
684 rval = qla2x00_mailbox_command(vha, mcp);
686 if (rval != QLA_SUCCESS) {
687 ql_dbg(ql_dbg_mbx, vha, 0x1026,
688 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
690 if (IS_FWI2_CAPABLE(ha)) {
691 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
692 ql_dbg(ql_dbg_mbx, vha, 0x119a,
693 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
694 ql_dbg(ql_dbg_mbx, vha, 0x1027,
695 "exchanges=%x.\n", mcp->mb[1]);
696 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
697 ha->max_speed_sup = mcp->mb[2] & BIT_0;
698 ql_dbg(ql_dbg_mbx, vha, 0x119b,
699 "Maximum speed supported=%s.\n",
700 ha->max_speed_sup ? "32Gps" : "16Gps");
701 if (vha->min_link_speed_feat) {
702 ha->min_link_speed = mcp->mb[5];
703 ql_dbg(ql_dbg_mbx, vha, 0x119c,
704 "Minimum speed set=%s.\n",
705 mcp->mb[5] == 5 ? "32Gps" :
706 mcp->mb[5] == 4 ? "16Gps" :
707 mcp->mb[5] == 3 ? "8Gps" :
708 mcp->mb[5] == 2 ? "4Gps" :
713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
721 * qla_get_exlogin_status
722 * Get extended login status
723 * uses the memory offload control/status Mailbox
726 * ha: adapter state pointer.
727 * fwopt: firmware options
730 * qla2x00 local function status
735 #define FETCH_XLOGINS_STAT 0x8
737 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
738 uint16_t *ex_logins_cnt)
742 mbx_cmd_t *mcp = &mc;
744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
745 "Entered %s\n", __func__);
747 memset(mcp->mb, 0 , sizeof(mcp->mb));
748 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
749 mcp->mb[1] = FETCH_XLOGINS_STAT;
750 mcp->out_mb = MBX_1|MBX_0;
751 mcp->in_mb = MBX_10|MBX_4|MBX_0;
752 mcp->tov = MBX_TOV_SECONDS;
755 rval = qla2x00_mailbox_command(vha, mcp);
756 if (rval != QLA_SUCCESS) {
757 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
759 *buf_sz = mcp->mb[4];
760 *ex_logins_cnt = mcp->mb[10];
762 ql_log(ql_log_info, vha, 0x1190,
763 "buffer size 0x%x, exchange login count=%d\n",
764 mcp->mb[4], mcp->mb[10]);
766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
767 "Done %s.\n", __func__);
774 * qla_set_exlogin_mem_cfg
775 * set extended login memory configuration
776 * Mbx needs to be issues before init_cb is set
779 * ha: adapter state pointer.
780 * buffer: buffer pointer
781 * phys_addr: physical address of buffer
782 * size: size of buffer
783 * TARGET_QUEUE_LOCK must be released
784 * ADAPTER_STATE_LOCK must be release
787 * qla2x00 local funxtion status code.
792 #define CONFIG_XLOGINS_MEM 0x3
794 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
798 mbx_cmd_t *mcp = &mc;
799 struct qla_hw_data *ha = vha->hw;
801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
802 "Entered %s.\n", __func__);
804 memset(mcp->mb, 0 , sizeof(mcp->mb));
805 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
806 mcp->mb[1] = CONFIG_XLOGINS_MEM;
807 mcp->mb[2] = MSW(phys_addr);
808 mcp->mb[3] = LSW(phys_addr);
809 mcp->mb[6] = MSW(MSD(phys_addr));
810 mcp->mb[7] = LSW(MSD(phys_addr));
811 mcp->mb[8] = MSW(ha->exlogin_size);
812 mcp->mb[9] = LSW(ha->exlogin_size);
813 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
814 mcp->in_mb = MBX_11|MBX_0;
815 mcp->tov = MBX_TOV_SECONDS;
817 rval = qla2x00_mailbox_command(vha, mcp);
818 if (rval != QLA_SUCCESS) {
820 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
823 "Done %s.\n", __func__);
830 * qla_get_exchoffld_status
831 * Get exchange offload status
832 * uses the memory offload control/status Mailbox
835 * ha: adapter state pointer.
836 * fwopt: firmware options
839 * qla2x00 local function status
844 #define FETCH_XCHOFFLD_STAT 0x2
846 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
847 uint16_t *ex_logins_cnt)
851 mbx_cmd_t *mcp = &mc;
853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
854 "Entered %s\n", __func__);
856 memset(mcp->mb, 0 , sizeof(mcp->mb));
857 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
858 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
859 mcp->out_mb = MBX_1|MBX_0;
860 mcp->in_mb = MBX_10|MBX_4|MBX_0;
861 mcp->tov = MBX_TOV_SECONDS;
864 rval = qla2x00_mailbox_command(vha, mcp);
865 if (rval != QLA_SUCCESS) {
866 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
868 *buf_sz = mcp->mb[4];
869 *ex_logins_cnt = mcp->mb[10];
871 ql_log(ql_log_info, vha, 0x118e,
872 "buffer size 0x%x, exchange offload count=%d\n",
873 mcp->mb[4], mcp->mb[10]);
875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
876 "Done %s.\n", __func__);
883 * qla_set_exchoffld_mem_cfg
884 * Set exchange offload memory configuration
885 * Mbx needs to be issues before init_cb is set
888 * ha: adapter state pointer.
889 * buffer: buffer pointer
890 * phys_addr: physical address of buffer
891 * size: size of buffer
892 * TARGET_QUEUE_LOCK must be released
893 * ADAPTER_STATE_LOCK must be release
896 * qla2x00 local funxtion status code.
901 #define CONFIG_XCHOFFLD_MEM 0x3
903 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
907 mbx_cmd_t *mcp = &mc;
908 struct qla_hw_data *ha = vha->hw;
910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
911 "Entered %s.\n", __func__);
913 memset(mcp->mb, 0 , sizeof(mcp->mb));
914 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
915 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
916 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
917 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
918 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
919 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
920 mcp->mb[8] = MSW(ha->exchoffld_size);
921 mcp->mb[9] = LSW(ha->exchoffld_size);
922 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
923 mcp->in_mb = MBX_11|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
926 rval = qla2x00_mailbox_command(vha, mcp);
927 if (rval != QLA_SUCCESS) {
929 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
932 "Done %s.\n", __func__);
939 * qla2x00_get_fw_version
940 * Get firmware version.
943 * ha: adapter state pointer.
944 * major: pointer for major number.
945 * minor: pointer for minor number.
946 * subminor: pointer for subminor number.
949 * qla2x00 local function return status code.
955 qla2x00_get_fw_version(scsi_qla_host_t *vha)
959 mbx_cmd_t *mcp = &mc;
960 struct qla_hw_data *ha = vha->hw;
962 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
963 "Entered %s.\n", __func__);
965 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
967 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
968 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
969 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
970 if (IS_FWI2_CAPABLE(ha))
971 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
974 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
975 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
978 mcp->tov = MBX_TOV_SECONDS;
979 rval = qla2x00_mailbox_command(vha, mcp);
980 if (rval != QLA_SUCCESS)
983 /* Return mailbox data. */
984 ha->fw_major_version = mcp->mb[1];
985 ha->fw_minor_version = mcp->mb[2];
986 ha->fw_subminor_version = mcp->mb[3];
987 ha->fw_attributes = mcp->mb[6];
988 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
989 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
991 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
993 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
994 ha->mpi_version[0] = mcp->mb[10] & 0xff;
995 ha->mpi_version[1] = mcp->mb[11] >> 8;
996 ha->mpi_version[2] = mcp->mb[11] & 0xff;
997 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
998 ha->phy_version[0] = mcp->mb[8] & 0xff;
999 ha->phy_version[1] = mcp->mb[9] >> 8;
1000 ha->phy_version[2] = mcp->mb[9] & 0xff;
1003 if (IS_FWI2_CAPABLE(ha)) {
1004 ha->fw_attributes_h = mcp->mb[15];
1005 ha->fw_attributes_ext[0] = mcp->mb[16];
1006 ha->fw_attributes_ext[1] = mcp->mb[17];
1007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1008 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1009 __func__, mcp->mb[15], mcp->mb[6]);
1010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1011 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1012 __func__, mcp->mb[17], mcp->mb[16]);
1014 if (ha->fw_attributes_h & 0x4)
1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1016 "%s: Firmware supports Extended Login 0x%x\n",
1017 __func__, ha->fw_attributes_h);
1019 if (ha->fw_attributes_h & 0x8)
1020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1021 "%s: Firmware supports Exchange Offload 0x%x\n",
1022 __func__, ha->fw_attributes_h);
1025 * FW supports nvme and driver load parameter requested nvme.
1026 * BIT 26 of fw_attributes indicates NVMe support.
1028 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
1029 vha->flags.nvme_enabled = 1;
1033 if (IS_QLA27XX(ha)) {
1034 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1035 ha->mpi_version[1] = mcp->mb[11] >> 8;
1036 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1037 ha->pep_version[0] = mcp->mb[13] & 0xff;
1038 ha->pep_version[1] = mcp->mb[14] >> 8;
1039 ha->pep_version[2] = mcp->mb[14] & 0xff;
1040 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1041 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1042 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1043 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1047 if (rval != QLA_SUCCESS) {
1049 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1053 "Done %s.\n", __func__);
1059 * qla2x00_get_fw_options
1060 * Set firmware options.
1063 * ha = adapter block pointer.
1064 * fwopt = pointer for firmware options.
1067 * qla2x00 local function return status code.
1073 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1077 mbx_cmd_t *mcp = &mc;
1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1080 "Entered %s.\n", __func__);
1082 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1083 mcp->out_mb = MBX_0;
1084 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1085 mcp->tov = MBX_TOV_SECONDS;
1087 rval = qla2x00_mailbox_command(vha, mcp);
1089 if (rval != QLA_SUCCESS) {
1091 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1093 fwopts[0] = mcp->mb[0];
1094 fwopts[1] = mcp->mb[1];
1095 fwopts[2] = mcp->mb[2];
1096 fwopts[3] = mcp->mb[3];
1098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1099 "Done %s.\n", __func__);
1107 * qla2x00_set_fw_options
1108 * Set firmware options.
1111 * ha = adapter block pointer.
1112 * fwopt = pointer for firmware options.
1115 * qla2x00 local function return status code.
1121 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1125 mbx_cmd_t *mcp = &mc;
1127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1128 "Entered %s.\n", __func__);
1130 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1131 mcp->mb[1] = fwopts[1];
1132 mcp->mb[2] = fwopts[2];
1133 mcp->mb[3] = fwopts[3];
1134 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1136 if (IS_FWI2_CAPABLE(vha->hw)) {
1137 mcp->in_mb |= MBX_1;
1138 mcp->mb[10] = fwopts[10];
1139 mcp->out_mb |= MBX_10;
1141 mcp->mb[10] = fwopts[10];
1142 mcp->mb[11] = fwopts[11];
1143 mcp->mb[12] = 0; /* Undocumented, but used */
1144 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1146 mcp->tov = MBX_TOV_SECONDS;
1148 rval = qla2x00_mailbox_command(vha, mcp);
1150 fwopts[0] = mcp->mb[0];
1152 if (rval != QLA_SUCCESS) {
1154 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1155 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1159 "Done %s.\n", __func__);
1166 * qla2x00_mbx_reg_test
1167 * Mailbox register wrap test.
1170 * ha = adapter block pointer.
1171 * TARGET_QUEUE_LOCK must be released.
1172 * ADAPTER_STATE_LOCK must be released.
1175 * qla2x00 local function return status code.
1181 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1185 mbx_cmd_t *mcp = &mc;
1187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1188 "Entered %s.\n", __func__);
1190 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1191 mcp->mb[1] = 0xAAAA;
1192 mcp->mb[2] = 0x5555;
1193 mcp->mb[3] = 0xAA55;
1194 mcp->mb[4] = 0x55AA;
1195 mcp->mb[5] = 0xA5A5;
1196 mcp->mb[6] = 0x5A5A;
1197 mcp->mb[7] = 0x2525;
1198 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1199 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1200 mcp->tov = MBX_TOV_SECONDS;
1202 rval = qla2x00_mailbox_command(vha, mcp);
1204 if (rval == QLA_SUCCESS) {
1205 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1206 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1207 rval = QLA_FUNCTION_FAILED;
1208 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1209 mcp->mb[7] != 0x2525)
1210 rval = QLA_FUNCTION_FAILED;
1213 if (rval != QLA_SUCCESS) {
1215 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1218 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1219 "Done %s.\n", __func__);
1226 * qla2x00_verify_checksum
1227 * Verify firmware checksum.
1230 * ha = adapter block pointer.
1231 * TARGET_QUEUE_LOCK must be released.
1232 * ADAPTER_STATE_LOCK must be released.
1235 * qla2x00 local function return status code.
1241 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1245 mbx_cmd_t *mcp = &mc;
1247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1248 "Entered %s.\n", __func__);
1250 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1251 mcp->out_mb = MBX_0;
1253 if (IS_FWI2_CAPABLE(vha->hw)) {
1254 mcp->mb[1] = MSW(risc_addr);
1255 mcp->mb[2] = LSW(risc_addr);
1256 mcp->out_mb |= MBX_2|MBX_1;
1257 mcp->in_mb |= MBX_2|MBX_1;
1259 mcp->mb[1] = LSW(risc_addr);
1260 mcp->out_mb |= MBX_1;
1261 mcp->in_mb |= MBX_1;
1264 mcp->tov = MBX_TOV_SECONDS;
1266 rval = qla2x00_mailbox_command(vha, mcp);
1268 if (rval != QLA_SUCCESS) {
1269 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1270 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1271 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1273 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1274 "Done %s.\n", __func__);
1281 * qla2x00_issue_iocb
1282 * Issue IOCB using mailbox command
1285 * ha = adapter state pointer.
1286 * buffer = buffer pointer.
1287 * phys_addr = physical address of buffer.
1288 * size = size of buffer.
1289 * TARGET_QUEUE_LOCK must be released.
1290 * ADAPTER_STATE_LOCK must be released.
1293 * qla2x00 local function return status code.
1299 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1300 dma_addr_t phys_addr, size_t size, uint32_t tov)
1304 mbx_cmd_t *mcp = &mc;
1306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1307 "Entered %s.\n", __func__);
1309 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1311 mcp->mb[2] = MSW(phys_addr);
1312 mcp->mb[3] = LSW(phys_addr);
1313 mcp->mb[6] = MSW(MSD(phys_addr));
1314 mcp->mb[7] = LSW(MSD(phys_addr));
1315 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1316 mcp->in_mb = MBX_2|MBX_0;
1319 rval = qla2x00_mailbox_command(vha, mcp);
1321 if (rval != QLA_SUCCESS) {
1323 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1325 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1327 /* Mask reserved bits. */
1328 sts_entry->entry_status &=
1329 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1330 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1331 "Done %s.\n", __func__);
1338 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1341 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1346 * qla2x00_abort_command
1347 * Abort command aborts a specified IOCB.
1350 * ha = adapter block pointer.
1351 * sp = SB structure pointer.
1354 * qla2x00 local function return status code.
1360 qla2x00_abort_command(srb_t *sp)
1362 unsigned long flags = 0;
1364 uint32_t handle = 0;
1366 mbx_cmd_t *mcp = &mc;
1367 fc_port_t *fcport = sp->fcport;
1368 scsi_qla_host_t *vha = fcport->vha;
1369 struct qla_hw_data *ha = vha->hw;
1370 struct req_que *req;
1371 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1374 "Entered %s.\n", __func__);
1376 if (vha->flags.qpairs_available && sp->qpair)
1377 req = sp->qpair->req;
1381 spin_lock_irqsave(&ha->hardware_lock, flags);
1382 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1383 if (req->outstanding_cmds[handle] == sp)
1386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1388 if (handle == req->num_outstanding_cmds) {
1389 /* command not found */
1390 return QLA_FUNCTION_FAILED;
1393 mcp->mb[0] = MBC_ABORT_COMMAND;
1394 if (HAS_EXTENDED_IDS(ha))
1395 mcp->mb[1] = fcport->loop_id;
1397 mcp->mb[1] = fcport->loop_id << 8;
1398 mcp->mb[2] = (uint16_t)handle;
1399 mcp->mb[3] = (uint16_t)(handle >> 16);
1400 mcp->mb[6] = (uint16_t)cmd->device->lun;
1401 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1403 mcp->tov = MBX_TOV_SECONDS;
1405 rval = qla2x00_mailbox_command(vha, mcp);
1407 if (rval != QLA_SUCCESS) {
1408 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1411 "Done %s.\n", __func__);
1418 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1422 mbx_cmd_t *mcp = &mc;
1423 scsi_qla_host_t *vha;
1424 struct req_que *req;
1425 struct rsp_que *rsp;
1430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1431 "Entered %s.\n", __func__);
1433 req = vha->hw->req_q_map[0];
1435 mcp->mb[0] = MBC_ABORT_TARGET;
1436 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1437 if (HAS_EXTENDED_IDS(vha->hw)) {
1438 mcp->mb[1] = fcport->loop_id;
1440 mcp->out_mb |= MBX_10;
1442 mcp->mb[1] = fcport->loop_id << 8;
1444 mcp->mb[2] = vha->hw->loop_reset_delay;
1445 mcp->mb[9] = vha->vp_idx;
1448 mcp->tov = MBX_TOV_SECONDS;
1450 rval = qla2x00_mailbox_command(vha, mcp);
1451 if (rval != QLA_SUCCESS) {
1452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1453 "Failed=%x.\n", rval);
1456 /* Issue marker IOCB. */
1457 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1459 if (rval2 != QLA_SUCCESS) {
1460 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1461 "Failed to issue marker IOCB (%x).\n", rval2);
1463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1464 "Done %s.\n", __func__);
1471 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1475 mbx_cmd_t *mcp = &mc;
1476 scsi_qla_host_t *vha;
1477 struct req_que *req;
1478 struct rsp_que *rsp;
1482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1483 "Entered %s.\n", __func__);
1485 req = vha->hw->req_q_map[0];
1487 mcp->mb[0] = MBC_LUN_RESET;
1488 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1489 if (HAS_EXTENDED_IDS(vha->hw))
1490 mcp->mb[1] = fcport->loop_id;
1492 mcp->mb[1] = fcport->loop_id << 8;
1493 mcp->mb[2] = (u32)l;
1495 mcp->mb[9] = vha->vp_idx;
1498 mcp->tov = MBX_TOV_SECONDS;
1500 rval = qla2x00_mailbox_command(vha, mcp);
1501 if (rval != QLA_SUCCESS) {
1502 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1505 /* Issue marker IOCB. */
1506 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1508 if (rval2 != QLA_SUCCESS) {
1509 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1510 "Failed to issue marker IOCB (%x).\n", rval2);
1512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1513 "Done %s.\n", __func__);
1520 * qla2x00_get_adapter_id
1521 * Get adapter ID and topology.
1524 * ha = adapter block pointer.
1525 * id = pointer for loop ID.
1526 * al_pa = pointer for AL_PA.
1527 * area = pointer for area.
1528 * domain = pointer for domain.
1529 * top = pointer for topology.
1530 * TARGET_QUEUE_LOCK must be released.
1531 * ADAPTER_STATE_LOCK must be released.
1534 * qla2x00 local function return status code.
1540 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1541 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1545 mbx_cmd_t *mcp = &mc;
1547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1548 "Entered %s.\n", __func__);
1550 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1551 mcp->mb[9] = vha->vp_idx;
1552 mcp->out_mb = MBX_9|MBX_0;
1553 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1554 if (IS_CNA_CAPABLE(vha->hw))
1555 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1556 if (IS_FWI2_CAPABLE(vha->hw))
1557 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1558 if (IS_QLA27XX(vha->hw))
1559 mcp->in_mb |= MBX_15;
1560 mcp->tov = MBX_TOV_SECONDS;
1562 rval = qla2x00_mailbox_command(vha, mcp);
1563 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1564 rval = QLA_COMMAND_ERROR;
1565 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1566 rval = QLA_INVALID_COMMAND;
1570 *al_pa = LSB(mcp->mb[2]);
1571 *area = MSB(mcp->mb[2]);
1572 *domain = LSB(mcp->mb[3]);
1574 *sw_cap = mcp->mb[7];
1576 if (rval != QLA_SUCCESS) {
1578 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1581 "Done %s.\n", __func__);
1583 if (IS_CNA_CAPABLE(vha->hw)) {
1584 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1585 vha->fcoe_fcf_idx = mcp->mb[10];
1586 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1587 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1588 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1589 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1590 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1591 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1593 /* If FA-WWN supported */
1594 if (IS_FAWWN_CAPABLE(vha->hw)) {
1595 if (mcp->mb[7] & BIT_14) {
1596 vha->port_name[0] = MSB(mcp->mb[16]);
1597 vha->port_name[1] = LSB(mcp->mb[16]);
1598 vha->port_name[2] = MSB(mcp->mb[17]);
1599 vha->port_name[3] = LSB(mcp->mb[17]);
1600 vha->port_name[4] = MSB(mcp->mb[18]);
1601 vha->port_name[5] = LSB(mcp->mb[18]);
1602 vha->port_name[6] = MSB(mcp->mb[19]);
1603 vha->port_name[7] = LSB(mcp->mb[19]);
1604 fc_host_port_name(vha->host) =
1605 wwn_to_u64(vha->port_name);
1606 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1607 "FA-WWN acquired %016llx\n",
1608 wwn_to_u64(vha->port_name));
1612 if (IS_QLA27XX(vha->hw))
1613 vha->bbcr = mcp->mb[15];
1620 * qla2x00_get_retry_cnt
1621 * Get current firmware login retry count and delay.
1624 * ha = adapter block pointer.
1625 * retry_cnt = pointer to login retry count.
1626 * tov = pointer to login timeout value.
1629 * qla2x00 local function return status code.
1635 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1641 mbx_cmd_t *mcp = &mc;
1643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1644 "Entered %s.\n", __func__);
1646 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1647 mcp->out_mb = MBX_0;
1648 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1649 mcp->tov = MBX_TOV_SECONDS;
1651 rval = qla2x00_mailbox_command(vha, mcp);
1653 if (rval != QLA_SUCCESS) {
1655 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1656 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1658 /* Convert returned data and check our values. */
1659 *r_a_tov = mcp->mb[3] / 2;
1660 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1661 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1662 /* Update to the larger values */
1663 *retry_cnt = (uint8_t)mcp->mb[1];
1667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1668 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1675 * qla2x00_init_firmware
1676 * Initialize adapter firmware.
1679 * ha = adapter block pointer.
1680 * dptr = Initialization control block pointer.
1681 * size = size of initialization control block.
1682 * TARGET_QUEUE_LOCK must be released.
1683 * ADAPTER_STATE_LOCK must be released.
1686 * qla2x00 local function return status code.
1692 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1696 mbx_cmd_t *mcp = &mc;
1697 struct qla_hw_data *ha = vha->hw;
1699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1700 "Entered %s.\n", __func__);
1702 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1703 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1704 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1706 if (ha->flags.npiv_supported)
1707 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1709 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1712 mcp->mb[2] = MSW(ha->init_cb_dma);
1713 mcp->mb[3] = LSW(ha->init_cb_dma);
1714 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1715 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1716 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1717 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1719 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1720 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1721 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1722 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1723 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1724 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1726 /* 1 and 2 should normally be captured. */
1727 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1728 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1729 /* mb3 is additional info about the installed SFP. */
1730 mcp->in_mb |= MBX_3;
1731 mcp->buf_size = size;
1732 mcp->flags = MBX_DMA_OUT;
1733 mcp->tov = MBX_TOV_SECONDS;
1734 rval = qla2x00_mailbox_command(vha, mcp);
1736 if (rval != QLA_SUCCESS) {
1738 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1739 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1740 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1742 if (IS_QLA27XX(ha)) {
1743 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1744 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1745 "Invalid SFP/Validation Failed\n");
1747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1748 "Done %s.\n", __func__);
1756 * qla2x00_get_port_database
1757 * Issue normal/enhanced get port database mailbox command
1758 * and copy device name as necessary.
1761 * ha = adapter state pointer.
1762 * dev = structure pointer.
1763 * opt = enhanced cmd option byte.
1766 * qla2x00 local function return status code.
1772 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1776 mbx_cmd_t *mcp = &mc;
1777 port_database_t *pd;
1778 struct port_database_24xx *pd24;
1780 struct qla_hw_data *ha = vha->hw;
1782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1783 "Entered %s.\n", __func__);
1786 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1788 ql_log(ql_log_warn, vha, 0x1050,
1789 "Failed to allocate port database structure.\n");
1790 return QLA_MEMORY_ALLOC_FAILED;
1792 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1794 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1795 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1796 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1797 mcp->mb[2] = MSW(pd_dma);
1798 mcp->mb[3] = LSW(pd_dma);
1799 mcp->mb[6] = MSW(MSD(pd_dma));
1800 mcp->mb[7] = LSW(MSD(pd_dma));
1801 mcp->mb[9] = vha->vp_idx;
1802 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1804 if (IS_FWI2_CAPABLE(ha)) {
1805 mcp->mb[1] = fcport->loop_id;
1807 mcp->out_mb |= MBX_10|MBX_1;
1808 mcp->in_mb |= MBX_1;
1809 } else if (HAS_EXTENDED_IDS(ha)) {
1810 mcp->mb[1] = fcport->loop_id;
1812 mcp->out_mb |= MBX_10|MBX_1;
1814 mcp->mb[1] = fcport->loop_id << 8 | opt;
1815 mcp->out_mb |= MBX_1;
1817 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1818 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1819 mcp->flags = MBX_DMA_IN;
1820 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1821 rval = qla2x00_mailbox_command(vha, mcp);
1822 if (rval != QLA_SUCCESS)
1825 if (IS_FWI2_CAPABLE(ha)) {
1827 pd24 = (struct port_database_24xx *) pd;
1829 /* Check for logged in state. */
1830 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1831 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1832 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1833 "Unable to verify login-state (%x/%x) for "
1834 "loop_id %x.\n", pd24->current_login_state,
1835 pd24->last_login_state, fcport->loop_id);
1836 rval = QLA_FUNCTION_FAILED;
1840 if (fcport->loop_id == FC_NO_LOOP_ID ||
1841 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1842 memcmp(fcport->port_name, pd24->port_name, 8))) {
1843 /* We lost the device mid way. */
1844 rval = QLA_NOT_LOGGED_IN;
1848 /* Names are little-endian. */
1849 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1850 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1852 /* Get port_id of device. */
1853 fcport->d_id.b.domain = pd24->port_id[0];
1854 fcport->d_id.b.area = pd24->port_id[1];
1855 fcport->d_id.b.al_pa = pd24->port_id[2];
1856 fcport->d_id.b.rsvd_1 = 0;
1858 /* If not target must be initiator or unknown type. */
1859 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1860 fcport->port_type = FCT_INITIATOR;
1862 fcport->port_type = FCT_TARGET;
1864 /* Passback COS information. */
1865 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1866 FC_COS_CLASS2 : FC_COS_CLASS3;
1868 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1869 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1873 /* Check for logged in state. */
1874 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1875 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1876 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1877 "Unable to verify login-state (%x/%x) - "
1878 "portid=%02x%02x%02x.\n", pd->master_state,
1879 pd->slave_state, fcport->d_id.b.domain,
1880 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1881 rval = QLA_FUNCTION_FAILED;
1885 if (fcport->loop_id == FC_NO_LOOP_ID ||
1886 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1887 memcmp(fcport->port_name, pd->port_name, 8))) {
1888 /* We lost the device mid way. */
1889 rval = QLA_NOT_LOGGED_IN;
1893 /* Names are little-endian. */
1894 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1895 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1897 /* Get port_id of device. */
1898 fcport->d_id.b.domain = pd->port_id[0];
1899 fcport->d_id.b.area = pd->port_id[3];
1900 fcport->d_id.b.al_pa = pd->port_id[2];
1901 fcport->d_id.b.rsvd_1 = 0;
1903 /* If not target must be initiator or unknown type. */
1904 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1905 fcport->port_type = FCT_INITIATOR;
1907 fcport->port_type = FCT_TARGET;
1909 /* Passback COS information. */
1910 fcport->supported_classes = (pd->options & BIT_4) ?
1911 FC_COS_CLASS2: FC_COS_CLASS3;
1915 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1917 if (rval != QLA_SUCCESS) {
1918 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1919 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1920 mcp->mb[0], mcp->mb[1]);
1922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1923 "Done %s.\n", __func__);
1930 * qla2x00_get_firmware_state
1931 * Get adapter firmware state.
1934 * ha = adapter block pointer.
1935 * dptr = pointer for firmware state.
1936 * TARGET_QUEUE_LOCK must be released.
1937 * ADAPTER_STATE_LOCK must be released.
1940 * qla2x00 local function return status code.
1946 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1950 mbx_cmd_t *mcp = &mc;
1951 struct qla_hw_data *ha = vha->hw;
1953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1954 "Entered %s.\n", __func__);
1956 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1957 mcp->out_mb = MBX_0;
1958 if (IS_FWI2_CAPABLE(vha->hw))
1959 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1961 mcp->in_mb = MBX_1|MBX_0;
1962 mcp->tov = MBX_TOV_SECONDS;
1964 rval = qla2x00_mailbox_command(vha, mcp);
1966 /* Return firmware states. */
1967 states[0] = mcp->mb[1];
1968 if (IS_FWI2_CAPABLE(vha->hw)) {
1969 states[1] = mcp->mb[2];
1970 states[2] = mcp->mb[3]; /* SFP info */
1971 states[3] = mcp->mb[4];
1972 states[4] = mcp->mb[5];
1973 states[5] = mcp->mb[6]; /* DPORT status */
1976 if (rval != QLA_SUCCESS) {
1978 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1980 if (IS_QLA27XX(ha)) {
1981 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1982 ql_dbg(ql_dbg_mbx, vha, 0x119e,
1983 "Invalid SFP/Validation Failed\n");
1985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1986 "Done %s.\n", __func__);
1993 * qla2x00_get_port_name
1994 * Issue get port name mailbox command.
1995 * Returned name is in big endian format.
1998 * ha = adapter block pointer.
1999 * loop_id = loop ID of device.
2000 * name = pointer for name.
2001 * TARGET_QUEUE_LOCK must be released.
2002 * ADAPTER_STATE_LOCK must be released.
2005 * qla2x00 local function return status code.
2011 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2016 mbx_cmd_t *mcp = &mc;
2018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2019 "Entered %s.\n", __func__);
2021 mcp->mb[0] = MBC_GET_PORT_NAME;
2022 mcp->mb[9] = vha->vp_idx;
2023 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2024 if (HAS_EXTENDED_IDS(vha->hw)) {
2025 mcp->mb[1] = loop_id;
2027 mcp->out_mb |= MBX_10;
2029 mcp->mb[1] = loop_id << 8 | opt;
2032 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2033 mcp->tov = MBX_TOV_SECONDS;
2035 rval = qla2x00_mailbox_command(vha, mcp);
2037 if (rval != QLA_SUCCESS) {
2039 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2042 /* This function returns name in big endian. */
2043 name[0] = MSB(mcp->mb[2]);
2044 name[1] = LSB(mcp->mb[2]);
2045 name[2] = MSB(mcp->mb[3]);
2046 name[3] = LSB(mcp->mb[3]);
2047 name[4] = MSB(mcp->mb[6]);
2048 name[5] = LSB(mcp->mb[6]);
2049 name[6] = MSB(mcp->mb[7]);
2050 name[7] = LSB(mcp->mb[7]);
2053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2054 "Done %s.\n", __func__);
2061 * qla24xx_link_initialization
2062 * Issue link initialization mailbox command.
2065 * ha = adapter block pointer.
2066 * TARGET_QUEUE_LOCK must be released.
2067 * ADAPTER_STATE_LOCK must be released.
2070 * qla2x00 local function return status code.
2076 qla24xx_link_initialize(scsi_qla_host_t *vha)
2080 mbx_cmd_t *mcp = &mc;
2082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2083 "Entered %s.\n", __func__);
2085 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2086 return QLA_FUNCTION_FAILED;
2088 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2090 if (vha->hw->operating_mode == LOOP)
2091 mcp->mb[1] |= BIT_6;
2093 mcp->mb[1] |= BIT_5;
2096 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2098 mcp->tov = MBX_TOV_SECONDS;
2100 rval = qla2x00_mailbox_command(vha, mcp);
2102 if (rval != QLA_SUCCESS) {
2103 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2105 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2106 "Done %s.\n", __func__);
2114 * Issue LIP reset mailbox command.
2117 * ha = adapter block pointer.
2118 * TARGET_QUEUE_LOCK must be released.
2119 * ADAPTER_STATE_LOCK must be released.
2122 * qla2x00 local function return status code.
2128 qla2x00_lip_reset(scsi_qla_host_t *vha)
2132 mbx_cmd_t *mcp = &mc;
2134 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2135 "Entered %s.\n", __func__);
2137 if (IS_CNA_CAPABLE(vha->hw)) {
2138 /* Logout across all FCFs. */
2139 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2142 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2143 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2144 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2147 mcp->mb[3] = vha->hw->loop_reset_delay;
2148 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2150 mcp->mb[0] = MBC_LIP_RESET;
2151 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2152 if (HAS_EXTENDED_IDS(vha->hw)) {
2153 mcp->mb[1] = 0x00ff;
2155 mcp->out_mb |= MBX_10;
2157 mcp->mb[1] = 0xff00;
2159 mcp->mb[2] = vha->hw->loop_reset_delay;
2163 mcp->tov = MBX_TOV_SECONDS;
2165 rval = qla2x00_mailbox_command(vha, mcp);
2167 if (rval != QLA_SUCCESS) {
2169 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2173 "Done %s.\n", __func__);
2184 * ha = adapter block pointer.
2185 * sns = pointer for command.
2186 * cmd_size = command size.
2187 * buf_size = response/command size.
2188 * TARGET_QUEUE_LOCK must be released.
2189 * ADAPTER_STATE_LOCK must be released.
2192 * qla2x00 local function return status code.
2198 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2199 uint16_t cmd_size, size_t buf_size)
2203 mbx_cmd_t *mcp = &mc;
2205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2206 "Entered %s.\n", __func__);
2208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2209 "Retry cnt=%d ratov=%d total tov=%d.\n",
2210 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2212 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2213 mcp->mb[1] = cmd_size;
2214 mcp->mb[2] = MSW(sns_phys_address);
2215 mcp->mb[3] = LSW(sns_phys_address);
2216 mcp->mb[6] = MSW(MSD(sns_phys_address));
2217 mcp->mb[7] = LSW(MSD(sns_phys_address));
2218 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2219 mcp->in_mb = MBX_0|MBX_1;
2220 mcp->buf_size = buf_size;
2221 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2222 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2223 rval = qla2x00_mailbox_command(vha, mcp);
2225 if (rval != QLA_SUCCESS) {
2227 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2228 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2229 rval, mcp->mb[0], mcp->mb[1]);
2232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2233 "Done %s.\n", __func__);
2240 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2241 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2245 struct logio_entry_24xx *lg;
2248 struct qla_hw_data *ha = vha->hw;
2249 struct req_que *req;
2251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2252 "Entered %s.\n", __func__);
2254 if (vha->vp_idx && vha->qpair)
2255 req = vha->qpair->req;
2257 req = ha->req_q_map[0];
2259 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2261 ql_log(ql_log_warn, vha, 0x1062,
2262 "Failed to allocate login IOCB.\n");
2263 return QLA_MEMORY_ALLOC_FAILED;
2265 memset(lg, 0, sizeof(struct logio_entry_24xx));
2267 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2268 lg->entry_count = 1;
2269 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2270 lg->nport_handle = cpu_to_le16(loop_id);
2271 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2273 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2275 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2276 lg->port_id[0] = al_pa;
2277 lg->port_id[1] = area;
2278 lg->port_id[2] = domain;
2279 lg->vp_index = vha->vp_idx;
2280 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2281 (ha->r_a_tov / 10 * 2) + 2);
2282 if (rval != QLA_SUCCESS) {
2283 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2284 "Failed to issue login IOCB (%x).\n", rval);
2285 } else if (lg->entry_status != 0) {
2286 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2287 "Failed to complete IOCB -- error status (%x).\n",
2289 rval = QLA_FUNCTION_FAILED;
2290 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2291 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2292 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2294 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2295 "Failed to complete IOCB -- completion status (%x) "
2296 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2300 case LSC_SCODE_PORTID_USED:
2301 mb[0] = MBS_PORT_ID_USED;
2302 mb[1] = LSW(iop[1]);
2304 case LSC_SCODE_NPORT_USED:
2305 mb[0] = MBS_LOOP_ID_USED;
2307 case LSC_SCODE_NOLINK:
2308 case LSC_SCODE_NOIOCB:
2309 case LSC_SCODE_NOXCB:
2310 case LSC_SCODE_CMD_FAILED:
2311 case LSC_SCODE_NOFABRIC:
2312 case LSC_SCODE_FW_NOT_READY:
2313 case LSC_SCODE_NOT_LOGGED_IN:
2314 case LSC_SCODE_NOPCB:
2315 case LSC_SCODE_ELS_REJECT:
2316 case LSC_SCODE_CMD_PARAM_ERR:
2317 case LSC_SCODE_NONPORT:
2318 case LSC_SCODE_LOGGED_IN:
2319 case LSC_SCODE_NOFLOGI_ACC:
2321 mb[0] = MBS_COMMAND_ERROR;
2325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2326 "Done %s.\n", __func__);
2328 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2330 mb[0] = MBS_COMMAND_COMPLETE;
2332 if (iop[0] & BIT_4) {
2338 /* Passback COS information. */
2340 if (lg->io_parameter[7] || lg->io_parameter[8])
2341 mb[10] |= BIT_0; /* Class 2. */
2342 if (lg->io_parameter[9] || lg->io_parameter[10])
2343 mb[10] |= BIT_1; /* Class 3. */
2344 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2345 mb[10] |= BIT_7; /* Confirmed Completion
2350 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2356 * qla2x00_login_fabric
2357 * Issue login fabric port mailbox command.
2360 * ha = adapter block pointer.
2361 * loop_id = device loop ID.
2362 * domain = device domain.
2363 * area = device area.
2364 * al_pa = device AL_PA.
2365 * status = pointer for return status.
2366 * opt = command options.
2367 * TARGET_QUEUE_LOCK must be released.
2368 * ADAPTER_STATE_LOCK must be released.
2371 * qla2x00 local function return status code.
2377 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2378 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2382 mbx_cmd_t *mcp = &mc;
2383 struct qla_hw_data *ha = vha->hw;
2385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2386 "Entered %s.\n", __func__);
2388 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2389 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2390 if (HAS_EXTENDED_IDS(ha)) {
2391 mcp->mb[1] = loop_id;
2393 mcp->out_mb |= MBX_10;
2395 mcp->mb[1] = (loop_id << 8) | opt;
2397 mcp->mb[2] = domain;
2398 mcp->mb[3] = area << 8 | al_pa;
2400 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2401 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2403 rval = qla2x00_mailbox_command(vha, mcp);
2405 /* Return mailbox statuses. */
2412 /* COS retrieved from Get-Port-Database mailbox command. */
2416 if (rval != QLA_SUCCESS) {
2417 /* RLU tmp code: need to change main mailbox_command function to
2418 * return ok even when the mailbox completion value is not
2419 * SUCCESS. The caller needs to be responsible to interpret
2420 * the return values of this mailbox command if we're not
2421 * to change too much of the existing code.
2423 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2424 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2425 mcp->mb[0] == 0x4006)
2429 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2430 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2431 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2435 "Done %s.\n", __func__);
2442 * qla2x00_login_local_device
2443 * Issue login loop port mailbox command.
2446 * ha = adapter block pointer.
2447 * loop_id = device loop ID.
2448 * opt = command options.
2451 * Return status code.
2458 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2459 uint16_t *mb_ret, uint8_t opt)
2463 mbx_cmd_t *mcp = &mc;
2464 struct qla_hw_data *ha = vha->hw;
2466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2467 "Entered %s.\n", __func__);
2469 if (IS_FWI2_CAPABLE(ha))
2470 return qla24xx_login_fabric(vha, fcport->loop_id,
2471 fcport->d_id.b.domain, fcport->d_id.b.area,
2472 fcport->d_id.b.al_pa, mb_ret, opt);
2474 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2475 if (HAS_EXTENDED_IDS(ha))
2476 mcp->mb[1] = fcport->loop_id;
2478 mcp->mb[1] = fcport->loop_id << 8;
2480 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2481 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2482 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2484 rval = qla2x00_mailbox_command(vha, mcp);
2486 /* Return mailbox statuses. */
2487 if (mb_ret != NULL) {
2488 mb_ret[0] = mcp->mb[0];
2489 mb_ret[1] = mcp->mb[1];
2490 mb_ret[6] = mcp->mb[6];
2491 mb_ret[7] = mcp->mb[7];
2494 if (rval != QLA_SUCCESS) {
2495 /* AV tmp code: need to change main mailbox_command function to
2496 * return ok even when the mailbox completion value is not
2497 * SUCCESS. The caller needs to be responsible to interpret
2498 * the return values of this mailbox command if we're not
2499 * to change too much of the existing code.
2501 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2504 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2505 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2506 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2510 "Done %s.\n", __func__);
2517 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2518 uint8_t area, uint8_t al_pa)
2521 struct logio_entry_24xx *lg;
2523 struct qla_hw_data *ha = vha->hw;
2524 struct req_que *req;
2526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2527 "Entered %s.\n", __func__);
2529 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2531 ql_log(ql_log_warn, vha, 0x106e,
2532 "Failed to allocate logout IOCB.\n");
2533 return QLA_MEMORY_ALLOC_FAILED;
2535 memset(lg, 0, sizeof(struct logio_entry_24xx));
2538 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2539 lg->entry_count = 1;
2540 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2541 lg->nport_handle = cpu_to_le16(loop_id);
2543 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2545 lg->port_id[0] = al_pa;
2546 lg->port_id[1] = area;
2547 lg->port_id[2] = domain;
2548 lg->vp_index = vha->vp_idx;
2549 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2550 (ha->r_a_tov / 10 * 2) + 2);
2551 if (rval != QLA_SUCCESS) {
2552 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2553 "Failed to issue logout IOCB (%x).\n", rval);
2554 } else if (lg->entry_status != 0) {
2555 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2556 "Failed to complete IOCB -- error status (%x).\n",
2558 rval = QLA_FUNCTION_FAILED;
2559 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2560 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2561 "Failed to complete IOCB -- completion status (%x) "
2562 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2563 le32_to_cpu(lg->io_parameter[0]),
2564 le32_to_cpu(lg->io_parameter[1]));
2567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2568 "Done %s.\n", __func__);
2571 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2577 * qla2x00_fabric_logout
2578 * Issue logout fabric port mailbox command.
2581 * ha = adapter block pointer.
2582 * loop_id = device loop ID.
2583 * TARGET_QUEUE_LOCK must be released.
2584 * ADAPTER_STATE_LOCK must be released.
2587 * qla2x00 local function return status code.
2593 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2594 uint8_t area, uint8_t al_pa)
2598 mbx_cmd_t *mcp = &mc;
2600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2601 "Entered %s.\n", __func__);
2603 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2604 mcp->out_mb = MBX_1|MBX_0;
2605 if (HAS_EXTENDED_IDS(vha->hw)) {
2606 mcp->mb[1] = loop_id;
2608 mcp->out_mb |= MBX_10;
2610 mcp->mb[1] = loop_id << 8;
2613 mcp->in_mb = MBX_1|MBX_0;
2614 mcp->tov = MBX_TOV_SECONDS;
2616 rval = qla2x00_mailbox_command(vha, mcp);
2618 if (rval != QLA_SUCCESS) {
2620 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2621 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2625 "Done %s.\n", __func__);
2632 * qla2x00_full_login_lip
2633 * Issue full login LIP mailbox command.
2636 * ha = adapter block pointer.
2637 * TARGET_QUEUE_LOCK must be released.
2638 * ADAPTER_STATE_LOCK must be released.
2641 * qla2x00 local function return status code.
2647 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2651 mbx_cmd_t *mcp = &mc;
2653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2654 "Entered %s.\n", __func__);
2656 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2657 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2660 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2662 mcp->tov = MBX_TOV_SECONDS;
2664 rval = qla2x00_mailbox_command(vha, mcp);
2666 if (rval != QLA_SUCCESS) {
2668 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2672 "Done %s.\n", __func__);
2679 * qla2x00_get_id_list
2682 * ha = adapter block pointer.
2685 * qla2x00 local function return status code.
2691 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2696 mbx_cmd_t *mcp = &mc;
2698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2699 "Entered %s.\n", __func__);
2701 if (id_list == NULL)
2702 return QLA_FUNCTION_FAILED;
2704 mcp->mb[0] = MBC_GET_ID_LIST;
2705 mcp->out_mb = MBX_0;
2706 if (IS_FWI2_CAPABLE(vha->hw)) {
2707 mcp->mb[2] = MSW(id_list_dma);
2708 mcp->mb[3] = LSW(id_list_dma);
2709 mcp->mb[6] = MSW(MSD(id_list_dma));
2710 mcp->mb[7] = LSW(MSD(id_list_dma));
2712 mcp->mb[9] = vha->vp_idx;
2713 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2715 mcp->mb[1] = MSW(id_list_dma);
2716 mcp->mb[2] = LSW(id_list_dma);
2717 mcp->mb[3] = MSW(MSD(id_list_dma));
2718 mcp->mb[6] = LSW(MSD(id_list_dma));
2719 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2721 mcp->in_mb = MBX_1|MBX_0;
2722 mcp->tov = MBX_TOV_SECONDS;
2724 rval = qla2x00_mailbox_command(vha, mcp);
2726 if (rval != QLA_SUCCESS) {
2728 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2730 *entries = mcp->mb[1];
2731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2732 "Done %s.\n", __func__);
2739 * qla2x00_get_resource_cnts
2740 * Get current firmware resource counts.
2743 * ha = adapter block pointer.
2746 * qla2x00 local function return status code.
2752 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2754 struct qla_hw_data *ha = vha->hw;
2757 mbx_cmd_t *mcp = &mc;
2759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2760 "Entered %s.\n", __func__);
2762 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2763 mcp->out_mb = MBX_0;
2764 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2765 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2766 mcp->in_mb |= MBX_12;
2767 mcp->tov = MBX_TOV_SECONDS;
2769 rval = qla2x00_mailbox_command(vha, mcp);
2771 if (rval != QLA_SUCCESS) {
2773 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2774 "Failed mb[0]=%x.\n", mcp->mb[0]);
2776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2777 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2778 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2779 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2780 mcp->mb[11], mcp->mb[12]);
2782 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2783 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2784 ha->cur_fw_xcb_count = mcp->mb[3];
2785 ha->orig_fw_xcb_count = mcp->mb[6];
2786 ha->cur_fw_iocb_count = mcp->mb[7];
2787 ha->orig_fw_iocb_count = mcp->mb[10];
2788 if (ha->flags.npiv_supported)
2789 ha->max_npiv_vports = mcp->mb[11];
2790 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2791 ha->fw_max_fcf_count = mcp->mb[12];
2798 * qla2x00_get_fcal_position_map
2799 * Get FCAL (LILP) position map using mailbox command
2802 * ha = adapter state pointer.
2803 * pos_map = buffer pointer (can be NULL).
2806 * qla2x00 local function return status code.
2812 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2816 mbx_cmd_t *mcp = &mc;
2818 dma_addr_t pmap_dma;
2819 struct qla_hw_data *ha = vha->hw;
2821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2822 "Entered %s.\n", __func__);
2824 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2826 ql_log(ql_log_warn, vha, 0x1080,
2827 "Memory alloc failed.\n");
2828 return QLA_MEMORY_ALLOC_FAILED;
2830 memset(pmap, 0, FCAL_MAP_SIZE);
2832 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2833 mcp->mb[2] = MSW(pmap_dma);
2834 mcp->mb[3] = LSW(pmap_dma);
2835 mcp->mb[6] = MSW(MSD(pmap_dma));
2836 mcp->mb[7] = LSW(MSD(pmap_dma));
2837 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2838 mcp->in_mb = MBX_1|MBX_0;
2839 mcp->buf_size = FCAL_MAP_SIZE;
2840 mcp->flags = MBX_DMA_IN;
2841 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2842 rval = qla2x00_mailbox_command(vha, mcp);
2844 if (rval == QLA_SUCCESS) {
2845 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2846 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2847 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2848 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2852 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2854 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2856 if (rval != QLA_SUCCESS) {
2857 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2860 "Done %s.\n", __func__);
2867 * qla2x00_get_link_status
2870 * ha = adapter block pointer.
2871 * loop_id = device loop ID.
2872 * ret_buf = pointer to link status return buffer.
2876 * BIT_0 = mem alloc error.
2877 * BIT_1 = mailbox error.
2880 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2881 struct link_statistics *stats, dma_addr_t stats_dma)
2885 mbx_cmd_t *mcp = &mc;
2886 uint32_t *iter = (void *)stats;
2887 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2888 struct qla_hw_data *ha = vha->hw;
2890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2891 "Entered %s.\n", __func__);
2893 mcp->mb[0] = MBC_GET_LINK_STATUS;
2894 mcp->mb[2] = MSW(LSD(stats_dma));
2895 mcp->mb[3] = LSW(LSD(stats_dma));
2896 mcp->mb[6] = MSW(MSD(stats_dma));
2897 mcp->mb[7] = LSW(MSD(stats_dma));
2898 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2900 if (IS_FWI2_CAPABLE(ha)) {
2901 mcp->mb[1] = loop_id;
2904 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2905 mcp->in_mb |= MBX_1;
2906 } else if (HAS_EXTENDED_IDS(ha)) {
2907 mcp->mb[1] = loop_id;
2909 mcp->out_mb |= MBX_10|MBX_1;
2911 mcp->mb[1] = loop_id << 8;
2912 mcp->out_mb |= MBX_1;
2914 mcp->tov = MBX_TOV_SECONDS;
2915 mcp->flags = IOCTL_CMD;
2916 rval = qla2x00_mailbox_command(vha, mcp);
2918 if (rval == QLA_SUCCESS) {
2919 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2920 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2921 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2922 rval = QLA_FUNCTION_FAILED;
2924 /* Re-endianize - firmware data is le32. */
2925 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2926 "Done %s.\n", __func__);
2927 for ( ; dwords--; iter++)
2932 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2939 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2940 dma_addr_t stats_dma, uint16_t options)
2944 mbx_cmd_t *mcp = &mc;
2945 uint32_t *iter, dwords;
2947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2948 "Entered %s.\n", __func__);
2950 memset(&mc, 0, sizeof(mc));
2951 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2952 mc.mb[2] = MSW(stats_dma);
2953 mc.mb[3] = LSW(stats_dma);
2954 mc.mb[6] = MSW(MSD(stats_dma));
2955 mc.mb[7] = LSW(MSD(stats_dma));
2956 mc.mb[8] = sizeof(struct link_statistics) / 4;
2957 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2958 mc.mb[10] = cpu_to_le16(options);
2960 rval = qla24xx_send_mb_cmd(vha, &mc);
2962 if (rval == QLA_SUCCESS) {
2963 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2964 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2965 "Failed mb[0]=%x.\n", mcp->mb[0]);
2966 rval = QLA_FUNCTION_FAILED;
2968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2969 "Done %s.\n", __func__);
2970 /* Re-endianize - firmware data is le32. */
2971 dwords = sizeof(struct link_statistics) / 4;
2972 iter = &stats->link_fail_cnt;
2973 for ( ; dwords--; iter++)
2978 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2985 qla24xx_abort_command(srb_t *sp)
2988 unsigned long flags = 0;
2990 struct abort_entry_24xx *abt;
2993 fc_port_t *fcport = sp->fcport;
2994 struct scsi_qla_host *vha = fcport->vha;
2995 struct qla_hw_data *ha = vha->hw;
2996 struct req_que *req = vha->req;
2998 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2999 "Entered %s.\n", __func__);
3002 req = sp->qpair->req;
3004 if (ql2xasynctmfenable)
3005 return qla24xx_async_abort_command(sp);
3007 spin_lock_irqsave(&ha->hardware_lock, flags);
3008 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3009 if (req->outstanding_cmds[handle] == sp)
3012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3013 if (handle == req->num_outstanding_cmds) {
3014 /* Command not found. */
3015 return QLA_FUNCTION_FAILED;
3018 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3020 ql_log(ql_log_warn, vha, 0x108d,
3021 "Failed to allocate abort IOCB.\n");
3022 return QLA_MEMORY_ALLOC_FAILED;
3024 memset(abt, 0, sizeof(struct abort_entry_24xx));
3026 abt->entry_type = ABORT_IOCB_TYPE;
3027 abt->entry_count = 1;
3028 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3029 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3030 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3031 abt->port_id[0] = fcport->d_id.b.al_pa;
3032 abt->port_id[1] = fcport->d_id.b.area;
3033 abt->port_id[2] = fcport->d_id.b.domain;
3034 abt->vp_index = fcport->vha->vp_idx;
3036 abt->req_que_no = cpu_to_le16(req->id);
3038 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3039 if (rval != QLA_SUCCESS) {
3040 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3041 "Failed to issue IOCB (%x).\n", rval);
3042 } else if (abt->entry_status != 0) {
3043 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3044 "Failed to complete IOCB -- error status (%x).\n",
3046 rval = QLA_FUNCTION_FAILED;
3047 } else if (abt->nport_handle != cpu_to_le16(0)) {
3048 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3049 "Failed to complete IOCB -- completion status (%x).\n",
3050 le16_to_cpu(abt->nport_handle));
3051 if (abt->nport_handle == CS_IOCB_ERROR)
3052 rval = QLA_FUNCTION_PARAMETER_ERROR;
3054 rval = QLA_FUNCTION_FAILED;
3056 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3057 "Done %s.\n", __func__);
3060 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3065 struct tsk_mgmt_cmd {
3067 struct tsk_mgmt_entry tsk;
3068 struct sts_entry_24xx sts;
3073 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3074 uint64_t l, int tag)
3077 struct tsk_mgmt_cmd *tsk;
3078 struct sts_entry_24xx *sts;
3080 scsi_qla_host_t *vha;
3081 struct qla_hw_data *ha;
3082 struct req_que *req;
3083 struct rsp_que *rsp;
3084 struct qla_qpair *qpair;
3090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3091 "Entered %s.\n", __func__);
3093 if (vha->vp_idx && vha->qpair) {
3102 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3104 ql_log(ql_log_warn, vha, 0x1093,
3105 "Failed to allocate task management IOCB.\n");
3106 return QLA_MEMORY_ALLOC_FAILED;
3108 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
3110 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3111 tsk->p.tsk.entry_count = 1;
3112 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3113 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3114 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3115 tsk->p.tsk.control_flags = cpu_to_le32(type);
3116 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3117 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3118 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3119 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3120 if (type == TCF_LUN_RESET) {
3121 int_to_scsilun(l, &tsk->p.tsk.lun);
3122 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3123 sizeof(tsk->p.tsk.lun));
3127 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3128 if (rval != QLA_SUCCESS) {
3129 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3130 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3131 } else if (sts->entry_status != 0) {
3132 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3133 "Failed to complete IOCB -- error status (%x).\n",
3135 rval = QLA_FUNCTION_FAILED;
3136 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3137 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3138 "Failed to complete IOCB -- completion status (%x).\n",
3139 le16_to_cpu(sts->comp_status));
3140 rval = QLA_FUNCTION_FAILED;
3141 } else if (le16_to_cpu(sts->scsi_status) &
3142 SS_RESPONSE_INFO_LEN_VALID) {
3143 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3145 "Ignoring inconsistent data length -- not enough "
3146 "response info (%d).\n",
3147 le32_to_cpu(sts->rsp_data_len));
3148 } else if (sts->data[3]) {
3149 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3150 "Failed to complete IOCB -- response (%x).\n",
3152 rval = QLA_FUNCTION_FAILED;
3156 /* Issue marker IOCB. */
3157 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3158 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3159 if (rval2 != QLA_SUCCESS) {
3160 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3161 "Failed to issue marker IOCB (%x).\n", rval2);
3163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3164 "Done %s.\n", __func__);
3167 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3173 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3175 struct qla_hw_data *ha = fcport->vha->hw;
3177 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3178 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3180 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3184 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3186 struct qla_hw_data *ha = fcport->vha->hw;
3188 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3189 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3191 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3195 qla2x00_system_error(scsi_qla_host_t *vha)
3199 mbx_cmd_t *mcp = &mc;
3200 struct qla_hw_data *ha = vha->hw;
3202 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3203 return QLA_FUNCTION_FAILED;
3205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3206 "Entered %s.\n", __func__);
3208 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3209 mcp->out_mb = MBX_0;
3213 rval = qla2x00_mailbox_command(vha, mcp);
3215 if (rval != QLA_SUCCESS) {
3216 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3218 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3219 "Done %s.\n", __func__);
3226 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3230 mbx_cmd_t *mcp = &mc;
3232 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3233 !IS_QLA27XX(vha->hw))
3234 return QLA_FUNCTION_FAILED;
3236 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3237 "Entered %s.\n", __func__);
3239 mcp->mb[0] = MBC_WRITE_SERDES;
3241 if (IS_QLA2031(vha->hw))
3242 mcp->mb[2] = data & 0xff;
3247 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3249 mcp->tov = MBX_TOV_SECONDS;
3251 rval = qla2x00_mailbox_command(vha, mcp);
3253 if (rval != QLA_SUCCESS) {
3254 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3255 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3258 "Done %s.\n", __func__);
3265 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3269 mbx_cmd_t *mcp = &mc;
3271 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3272 !IS_QLA27XX(vha->hw))
3273 return QLA_FUNCTION_FAILED;
3275 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3276 "Entered %s.\n", __func__);
3278 mcp->mb[0] = MBC_READ_SERDES;
3281 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3282 mcp->in_mb = MBX_1|MBX_0;
3283 mcp->tov = MBX_TOV_SECONDS;
3285 rval = qla2x00_mailbox_command(vha, mcp);
3287 if (IS_QLA2031(vha->hw))
3288 *data = mcp->mb[1] & 0xff;
3292 if (rval != QLA_SUCCESS) {
3293 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3294 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3296 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3297 "Done %s.\n", __func__);
3304 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3308 mbx_cmd_t *mcp = &mc;
3310 if (!IS_QLA8044(vha->hw))
3311 return QLA_FUNCTION_FAILED;
3313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3314 "Entered %s.\n", __func__);
3316 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3317 mcp->mb[1] = HCS_WRITE_SERDES;
3318 mcp->mb[3] = LSW(addr);
3319 mcp->mb[4] = MSW(addr);
3320 mcp->mb[5] = LSW(data);
3321 mcp->mb[6] = MSW(data);
3322 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3324 mcp->tov = MBX_TOV_SECONDS;
3326 rval = qla2x00_mailbox_command(vha, mcp);
3328 if (rval != QLA_SUCCESS) {
3329 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3330 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3333 "Done %s.\n", __func__);
3340 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3344 mbx_cmd_t *mcp = &mc;
3346 if (!IS_QLA8044(vha->hw))
3347 return QLA_FUNCTION_FAILED;
3349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3350 "Entered %s.\n", __func__);
3352 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3353 mcp->mb[1] = HCS_READ_SERDES;
3354 mcp->mb[3] = LSW(addr);
3355 mcp->mb[4] = MSW(addr);
3356 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3357 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3358 mcp->tov = MBX_TOV_SECONDS;
3360 rval = qla2x00_mailbox_command(vha, mcp);
3362 *data = mcp->mb[2] << 16 | mcp->mb[1];
3364 if (rval != QLA_SUCCESS) {
3365 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3369 "Done %s.\n", __func__);
3376 * qla2x00_set_serdes_params() -
3382 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3383 uint16_t sw_em_2g, uint16_t sw_em_4g)
3387 mbx_cmd_t *mcp = &mc;
3389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3390 "Entered %s.\n", __func__);
3392 mcp->mb[0] = MBC_SERDES_PARAMS;
3394 mcp->mb[2] = sw_em_1g | BIT_15;
3395 mcp->mb[3] = sw_em_2g | BIT_15;
3396 mcp->mb[4] = sw_em_4g | BIT_15;
3397 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3399 mcp->tov = MBX_TOV_SECONDS;
3401 rval = qla2x00_mailbox_command(vha, mcp);
3403 if (rval != QLA_SUCCESS) {
3405 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3406 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3410 "Done %s.\n", __func__);
3417 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3421 mbx_cmd_t *mcp = &mc;
3423 if (!IS_FWI2_CAPABLE(vha->hw))
3424 return QLA_FUNCTION_FAILED;
3426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3427 "Entered %s.\n", __func__);
3429 mcp->mb[0] = MBC_STOP_FIRMWARE;
3431 mcp->out_mb = MBX_1|MBX_0;
3435 rval = qla2x00_mailbox_command(vha, mcp);
3437 if (rval != QLA_SUCCESS) {
3438 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3439 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3440 rval = QLA_INVALID_COMMAND;
3442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3443 "Done %s.\n", __func__);
3450 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3455 mbx_cmd_t *mcp = &mc;
3457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3458 "Entered %s.\n", __func__);
3460 if (!IS_FWI2_CAPABLE(vha->hw))
3461 return QLA_FUNCTION_FAILED;
3463 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3464 return QLA_FUNCTION_FAILED;
3466 mcp->mb[0] = MBC_TRACE_CONTROL;
3467 mcp->mb[1] = TC_EFT_ENABLE;
3468 mcp->mb[2] = LSW(eft_dma);
3469 mcp->mb[3] = MSW(eft_dma);
3470 mcp->mb[4] = LSW(MSD(eft_dma));
3471 mcp->mb[5] = MSW(MSD(eft_dma));
3472 mcp->mb[6] = buffers;
3473 mcp->mb[7] = TC_AEN_DISABLE;
3474 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3475 mcp->in_mb = MBX_1|MBX_0;
3476 mcp->tov = MBX_TOV_SECONDS;
3478 rval = qla2x00_mailbox_command(vha, mcp);
3479 if (rval != QLA_SUCCESS) {
3480 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3481 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3482 rval, mcp->mb[0], mcp->mb[1]);
3484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3485 "Done %s.\n", __func__);
3492 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3496 mbx_cmd_t *mcp = &mc;
3498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3499 "Entered %s.\n", __func__);
3501 if (!IS_FWI2_CAPABLE(vha->hw))
3502 return QLA_FUNCTION_FAILED;
3504 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3505 return QLA_FUNCTION_FAILED;
3507 mcp->mb[0] = MBC_TRACE_CONTROL;
3508 mcp->mb[1] = TC_EFT_DISABLE;
3509 mcp->out_mb = MBX_1|MBX_0;
3510 mcp->in_mb = MBX_1|MBX_0;
3511 mcp->tov = MBX_TOV_SECONDS;
3513 rval = qla2x00_mailbox_command(vha, mcp);
3514 if (rval != QLA_SUCCESS) {
3515 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3516 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3517 rval, mcp->mb[0], mcp->mb[1]);
3519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3520 "Done %s.\n", __func__);
3527 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3528 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3532 mbx_cmd_t *mcp = &mc;
3534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3535 "Entered %s.\n", __func__);
3537 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3538 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3539 return QLA_FUNCTION_FAILED;
3541 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3542 return QLA_FUNCTION_FAILED;
3544 mcp->mb[0] = MBC_TRACE_CONTROL;
3545 mcp->mb[1] = TC_FCE_ENABLE;
3546 mcp->mb[2] = LSW(fce_dma);
3547 mcp->mb[3] = MSW(fce_dma);
3548 mcp->mb[4] = LSW(MSD(fce_dma));
3549 mcp->mb[5] = MSW(MSD(fce_dma));
3550 mcp->mb[6] = buffers;
3551 mcp->mb[7] = TC_AEN_DISABLE;
3553 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3554 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3555 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3557 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3558 mcp->tov = MBX_TOV_SECONDS;
3560 rval = qla2x00_mailbox_command(vha, mcp);
3561 if (rval != QLA_SUCCESS) {
3562 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3563 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3564 rval, mcp->mb[0], mcp->mb[1]);
3566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3567 "Done %s.\n", __func__);
3570 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3579 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3583 mbx_cmd_t *mcp = &mc;
3585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3586 "Entered %s.\n", __func__);
3588 if (!IS_FWI2_CAPABLE(vha->hw))
3589 return QLA_FUNCTION_FAILED;
3591 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3592 return QLA_FUNCTION_FAILED;
3594 mcp->mb[0] = MBC_TRACE_CONTROL;
3595 mcp->mb[1] = TC_FCE_DISABLE;
3596 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3597 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3598 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3600 mcp->tov = MBX_TOV_SECONDS;
3602 rval = qla2x00_mailbox_command(vha, mcp);
3603 if (rval != QLA_SUCCESS) {
3604 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3605 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3606 rval, mcp->mb[0], mcp->mb[1]);
3608 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3609 "Done %s.\n", __func__);
3612 *wr = (uint64_t) mcp->mb[5] << 48 |
3613 (uint64_t) mcp->mb[4] << 32 |
3614 (uint64_t) mcp->mb[3] << 16 |
3615 (uint64_t) mcp->mb[2];
3617 *rd = (uint64_t) mcp->mb[9] << 48 |
3618 (uint64_t) mcp->mb[8] << 32 |
3619 (uint64_t) mcp->mb[7] << 16 |
3620 (uint64_t) mcp->mb[6];
3627 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3628 uint16_t *port_speed, uint16_t *mb)
3632 mbx_cmd_t *mcp = &mc;
3634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3635 "Entered %s.\n", __func__);
3637 if (!IS_IIDMA_CAPABLE(vha->hw))
3638 return QLA_FUNCTION_FAILED;
3640 mcp->mb[0] = MBC_PORT_PARAMS;
3641 mcp->mb[1] = loop_id;
3642 mcp->mb[2] = mcp->mb[3] = 0;
3643 mcp->mb[9] = vha->vp_idx;
3644 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3645 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3646 mcp->tov = MBX_TOV_SECONDS;
3648 rval = qla2x00_mailbox_command(vha, mcp);
3650 /* Return mailbox statuses. */
3657 if (rval != QLA_SUCCESS) {
3658 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3661 "Done %s.\n", __func__);
3663 *port_speed = mcp->mb[3];
3670 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3671 uint16_t port_speed, uint16_t *mb)
3675 mbx_cmd_t *mcp = &mc;
3677 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3678 "Entered %s.\n", __func__);
3680 if (!IS_IIDMA_CAPABLE(vha->hw))
3681 return QLA_FUNCTION_FAILED;
3683 mcp->mb[0] = MBC_PORT_PARAMS;
3684 mcp->mb[1] = loop_id;
3686 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3687 mcp->mb[9] = vha->vp_idx;
3688 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3689 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3690 mcp->tov = MBX_TOV_SECONDS;
3692 rval = qla2x00_mailbox_command(vha, mcp);
3694 /* Return mailbox statuses. */
3701 if (rval != QLA_SUCCESS) {
3702 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3703 "Failed=%x.\n", rval);
3705 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3706 "Done %s.\n", __func__);
3713 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3714 struct vp_rpt_id_entry_24xx *rptid_entry)
3716 struct qla_hw_data *ha = vha->hw;
3717 scsi_qla_host_t *vp = NULL;
3718 unsigned long flags;
3722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3723 "Entered %s.\n", __func__);
3725 if (rptid_entry->entry_status != 0)
3728 id.b.domain = rptid_entry->port_id[2];
3729 id.b.area = rptid_entry->port_id[1];
3730 id.b.al_pa = rptid_entry->port_id[0];
3733 if (rptid_entry->format == 0) {
3735 ql_dbg(ql_dbg_async, vha, 0x10b7,
3736 "Format 0 : Number of VPs setup %d, number of "
3737 "VPs acquired %d.\n", rptid_entry->vp_setup,
3738 rptid_entry->vp_acquired);
3739 ql_dbg(ql_dbg_async, vha, 0x10b8,
3740 "Primary port id %02x%02x%02x.\n",
3741 rptid_entry->port_id[2], rptid_entry->port_id[1],
3742 rptid_entry->port_id[0]);
3744 qlt_update_host_map(vha, id);
3746 } else if (rptid_entry->format == 1) {
3748 ql_dbg(ql_dbg_async, vha, 0x10b9,
3749 "Format 1: VP[%d] enabled - status %d - with "
3750 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3751 rptid_entry->vp_status,
3752 rptid_entry->port_id[2], rptid_entry->port_id[1],
3753 rptid_entry->port_id[0]);
3755 /* buffer to buffer credit flag */
3756 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3758 if (rptid_entry->vp_idx == 0) {
3759 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3760 /* FA-WWN is only for physical port */
3761 if (qla_ini_mode_enabled(vha) &&
3762 ha->flags.fawwpn_enabled &&
3763 (rptid_entry->u.f1.flags &
3765 memcpy(vha->port_name,
3766 rptid_entry->u.f1.port_name,
3770 qlt_update_host_map(vha, id);
3773 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3774 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3776 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3777 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3778 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3779 "Could not acquire ID for VP[%d].\n",
3780 rptid_entry->vp_idx);
3785 spin_lock_irqsave(&ha->vport_slock, flags);
3786 list_for_each_entry(vp, &ha->vp_list, list) {
3787 if (rptid_entry->vp_idx == vp->vp_idx) {
3792 spin_unlock_irqrestore(&ha->vport_slock, flags);
3797 qlt_update_host_map(vp, id);
3800 * Cannot configure here as we are still sitting on the
3801 * response queue. Handle it in dpc context.
3803 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3804 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3805 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3807 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3808 qla2xxx_wake_dpc(vha);
3809 } else if (rptid_entry->format == 2) {
3810 ql_dbg(ql_dbg_async, vha, 0x505f,
3811 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3812 rptid_entry->port_id[2], rptid_entry->port_id[1],
3813 rptid_entry->port_id[0]);
3815 ql_dbg(ql_dbg_async, vha, 0x5075,
3816 "N2N: Remote WWPN %8phC.\n",
3817 rptid_entry->u.f2.port_name);
3819 /* N2N. direct connect */
3820 vha->d_id.b.domain = rptid_entry->port_id[2];
3821 vha->d_id.b.area = rptid_entry->port_id[1];
3822 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3824 spin_lock_irqsave(&ha->vport_slock, flags);
3825 qlt_update_vp_map(vha, SET_AL_PA);
3826 spin_unlock_irqrestore(&ha->vport_slock, flags);
3831 * qla24xx_modify_vp_config
3832 * Change VP configuration for vha
3835 * vha = adapter block pointer.
3838 * qla2xxx local function return status code.
3844 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3847 struct vp_config_entry_24xx *vpmod;
3848 dma_addr_t vpmod_dma;
3849 struct qla_hw_data *ha = vha->hw;
3850 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3852 /* This can be called by the parent */
3854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3855 "Entered %s.\n", __func__);
3857 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3859 ql_log(ql_log_warn, vha, 0x10bc,
3860 "Failed to allocate modify VP IOCB.\n");
3861 return QLA_MEMORY_ALLOC_FAILED;
3864 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3865 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3866 vpmod->entry_count = 1;
3867 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3868 vpmod->vp_count = 1;
3869 vpmod->vp_index1 = vha->vp_idx;
3870 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3872 qlt_modify_vp_config(vha, vpmod);
3874 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3875 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3876 vpmod->entry_count = 1;
3878 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3879 if (rval != QLA_SUCCESS) {
3880 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3881 "Failed to issue VP config IOCB (%x).\n", rval);
3882 } else if (vpmod->comp_status != 0) {
3883 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3884 "Failed to complete IOCB -- error status (%x).\n",
3885 vpmod->comp_status);
3886 rval = QLA_FUNCTION_FAILED;
3887 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3888 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3889 "Failed to complete IOCB -- completion status (%x).\n",
3890 le16_to_cpu(vpmod->comp_status));
3891 rval = QLA_FUNCTION_FAILED;
3894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3895 "Done %s.\n", __func__);
3896 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3898 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3904 * qla24xx_control_vp
3905 * Enable a virtual port for given host
3908 * ha = adapter block pointer.
3909 * vhba = virtual adapter (unused)
3910 * index = index number for enabled VP
3913 * qla2xxx local function return status code.
3919 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3923 struct vp_ctrl_entry_24xx *vce;
3925 struct qla_hw_data *ha = vha->hw;
3926 int vp_index = vha->vp_idx;
3927 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3930 "Entered %s enabling index %d.\n", __func__, vp_index);
3932 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3933 return QLA_PARAMETER_ERROR;
3935 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3937 ql_log(ql_log_warn, vha, 0x10c2,
3938 "Failed to allocate VP control IOCB.\n");
3939 return QLA_MEMORY_ALLOC_FAILED;
3941 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3943 vce->entry_type = VP_CTRL_IOCB_TYPE;
3944 vce->entry_count = 1;
3945 vce->command = cpu_to_le16(cmd);
3946 vce->vp_count = cpu_to_le16(1);
3948 /* index map in firmware starts with 1; decrement index
3949 * this is ok as we never use index 0
3951 map = (vp_index - 1) / 8;
3952 pos = (vp_index - 1) & 7;
3953 mutex_lock(&ha->vport_lock);
3954 vce->vp_idx_map[map] |= 1 << pos;
3955 mutex_unlock(&ha->vport_lock);
3957 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3958 if (rval != QLA_SUCCESS) {
3959 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3960 "Failed to issue VP control IOCB (%x).\n", rval);
3961 } else if (vce->entry_status != 0) {
3962 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3963 "Failed to complete IOCB -- error status (%x).\n",
3965 rval = QLA_FUNCTION_FAILED;
3966 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
3967 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3968 "Failed to complete IOCB -- completion status (%x).\n",
3969 le16_to_cpu(vce->comp_status));
3970 rval = QLA_FUNCTION_FAILED;
3972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3973 "Done %s.\n", __func__);
3976 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3982 * qla2x00_send_change_request
3983 * Receive or disable RSCN request from fabric controller
3986 * ha = adapter block pointer
3987 * format = registration format:
3989 * 1 - Fabric detected registration
3990 * 2 - N_port detected registration
3991 * 3 - Full registration
3992 * FF - clear registration
3993 * vp_idx = Virtual port index
3996 * qla2x00 local function return status code.
4003 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4008 mbx_cmd_t *mcp = &mc;
4010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4011 "Entered %s.\n", __func__);
4013 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4014 mcp->mb[1] = format;
4015 mcp->mb[9] = vp_idx;
4016 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4017 mcp->in_mb = MBX_0|MBX_1;
4018 mcp->tov = MBX_TOV_SECONDS;
4020 rval = qla2x00_mailbox_command(vha, mcp);
4022 if (rval == QLA_SUCCESS) {
4023 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4033 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4038 mbx_cmd_t *mcp = &mc;
4040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4041 "Entered %s.\n", __func__);
4043 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4044 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4045 mcp->mb[8] = MSW(addr);
4046 mcp->out_mb = MBX_8|MBX_0;
4048 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4049 mcp->out_mb = MBX_0;
4051 mcp->mb[1] = LSW(addr);
4052 mcp->mb[2] = MSW(req_dma);
4053 mcp->mb[3] = LSW(req_dma);
4054 mcp->mb[6] = MSW(MSD(req_dma));
4055 mcp->mb[7] = LSW(MSD(req_dma));
4056 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4057 if (IS_FWI2_CAPABLE(vha->hw)) {
4058 mcp->mb[4] = MSW(size);
4059 mcp->mb[5] = LSW(size);
4060 mcp->out_mb |= MBX_5|MBX_4;
4062 mcp->mb[4] = LSW(size);
4063 mcp->out_mb |= MBX_4;
4067 mcp->tov = MBX_TOV_SECONDS;
4069 rval = qla2x00_mailbox_command(vha, mcp);
4071 if (rval != QLA_SUCCESS) {
4072 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4073 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4076 "Done %s.\n", __func__);
4081 /* 84XX Support **************************************************************/
4083 struct cs84xx_mgmt_cmd {
4085 struct verify_chip_entry_84xx req;
4086 struct verify_chip_rsp_84xx rsp;
4091 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4094 struct cs84xx_mgmt_cmd *mn;
4097 unsigned long flags;
4098 struct qla_hw_data *ha = vha->hw;
4100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4101 "Entered %s.\n", __func__);
4103 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4105 return QLA_MEMORY_ALLOC_FAILED;
4109 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4110 /* Diagnostic firmware? */
4111 /* options |= MENLO_DIAG_FW; */
4112 /* We update the firmware with only one data sequence. */
4113 options |= VCO_END_OF_DATA;
4117 memset(mn, 0, sizeof(*mn));
4118 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4119 mn->p.req.entry_count = 1;
4120 mn->p.req.options = cpu_to_le16(options);
4122 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4123 "Dump of Verify Request.\n");
4124 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4125 (uint8_t *)mn, sizeof(*mn));
4127 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4128 if (rval != QLA_SUCCESS) {
4129 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4130 "Failed to issue verify IOCB (%x).\n", rval);
4134 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4135 "Dump of Verify Response.\n");
4136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4137 (uint8_t *)mn, sizeof(*mn));
4139 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4140 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4141 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4143 "cs=%x fc=%x.\n", status[0], status[1]);
4145 if (status[0] != CS_COMPLETE) {
4146 rval = QLA_FUNCTION_FAILED;
4147 if (!(options & VCO_DONT_UPDATE_FW)) {
4148 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4149 "Firmware update failed. Retrying "
4150 "without update firmware.\n");
4151 options |= VCO_DONT_UPDATE_FW;
4152 options &= ~VCO_FORCE_UPDATE;
4156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4157 "Firmware updated to %x.\n",
4158 le32_to_cpu(mn->p.rsp.fw_ver));
4160 /* NOTE: we only update OP firmware. */
4161 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4162 ha->cs84xx->op_fw_version =
4163 le32_to_cpu(mn->p.rsp.fw_ver);
4164 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4170 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4172 if (rval != QLA_SUCCESS) {
4173 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4174 "Failed=%x.\n", rval);
4176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4177 "Done %s.\n", __func__);
4184 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4187 unsigned long flags;
4189 mbx_cmd_t *mcp = &mc;
4190 struct qla_hw_data *ha = vha->hw;
4192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4193 "Entered %s.\n", __func__);
4195 if (IS_SHADOW_REG_CAPABLE(ha))
4196 req->options |= BIT_13;
4198 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4199 mcp->mb[1] = req->options;
4200 mcp->mb[2] = MSW(LSD(req->dma));
4201 mcp->mb[3] = LSW(LSD(req->dma));
4202 mcp->mb[6] = MSW(MSD(req->dma));
4203 mcp->mb[7] = LSW(MSD(req->dma));
4204 mcp->mb[5] = req->length;
4206 mcp->mb[10] = req->rsp->id;
4207 mcp->mb[12] = req->qos;
4208 mcp->mb[11] = req->vp_idx;
4209 mcp->mb[13] = req->rid;
4210 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4213 mcp->mb[4] = req->id;
4214 /* que in ptr index */
4216 /* que out ptr index */
4217 mcp->mb[9] = *req->out_ptr = 0;
4218 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4219 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4221 mcp->flags = MBX_DMA_OUT;
4222 mcp->tov = MBX_TOV_SECONDS * 2;
4224 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4225 mcp->in_mb |= MBX_1;
4226 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4227 mcp->out_mb |= MBX_15;
4228 /* debug q create issue in SR-IOV */
4229 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4232 spin_lock_irqsave(&ha->hardware_lock, flags);
4233 if (!(req->options & BIT_0)) {
4234 WRT_REG_DWORD(req->req_q_in, 0);
4235 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4236 WRT_REG_DWORD(req->req_q_out, 0);
4238 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4240 rval = qla2x00_mailbox_command(vha, mcp);
4241 if (rval != QLA_SUCCESS) {
4242 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4243 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4246 "Done %s.\n", __func__);
4253 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4256 unsigned long flags;
4258 mbx_cmd_t *mcp = &mc;
4259 struct qla_hw_data *ha = vha->hw;
4261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4262 "Entered %s.\n", __func__);
4264 if (IS_SHADOW_REG_CAPABLE(ha))
4265 rsp->options |= BIT_13;
4267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4268 mcp->mb[1] = rsp->options;
4269 mcp->mb[2] = MSW(LSD(rsp->dma));
4270 mcp->mb[3] = LSW(LSD(rsp->dma));
4271 mcp->mb[6] = MSW(MSD(rsp->dma));
4272 mcp->mb[7] = LSW(MSD(rsp->dma));
4273 mcp->mb[5] = rsp->length;
4274 mcp->mb[14] = rsp->msix->entry;
4275 mcp->mb[13] = rsp->rid;
4276 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4279 mcp->mb[4] = rsp->id;
4280 /* que in ptr index */
4281 mcp->mb[8] = *rsp->in_ptr = 0;
4282 /* que out ptr index */
4284 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4285 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4287 mcp->flags = MBX_DMA_OUT;
4288 mcp->tov = MBX_TOV_SECONDS * 2;
4290 if (IS_QLA81XX(ha)) {
4291 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4292 mcp->in_mb |= MBX_1;
4293 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4294 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4295 mcp->in_mb |= MBX_1;
4296 /* debug q create issue in SR-IOV */
4297 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4300 spin_lock_irqsave(&ha->hardware_lock, flags);
4301 if (!(rsp->options & BIT_0)) {
4302 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4303 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4304 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4307 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4309 rval = qla2x00_mailbox_command(vha, mcp);
4310 if (rval != QLA_SUCCESS) {
4311 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4312 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4315 "Done %s.\n", __func__);
4322 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4326 mbx_cmd_t *mcp = &mc;
4328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4329 "Entered %s.\n", __func__);
4331 mcp->mb[0] = MBC_IDC_ACK;
4332 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4333 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4335 mcp->tov = MBX_TOV_SECONDS;
4337 rval = qla2x00_mailbox_command(vha, mcp);
4339 if (rval != QLA_SUCCESS) {
4340 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4341 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4344 "Done %s.\n", __func__);
4351 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4355 mbx_cmd_t *mcp = &mc;
4357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4358 "Entered %s.\n", __func__);
4360 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4361 !IS_QLA27XX(vha->hw))
4362 return QLA_FUNCTION_FAILED;
4364 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4365 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4366 mcp->out_mb = MBX_1|MBX_0;
4367 mcp->in_mb = MBX_1|MBX_0;
4368 mcp->tov = MBX_TOV_SECONDS;
4370 rval = qla2x00_mailbox_command(vha, mcp);
4372 if (rval != QLA_SUCCESS) {
4373 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4374 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4375 rval, mcp->mb[0], mcp->mb[1]);
4377 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4378 "Done %s.\n", __func__);
4379 *sector_size = mcp->mb[1];
4386 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4390 mbx_cmd_t *mcp = &mc;
4392 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4393 !IS_QLA27XX(vha->hw))
4394 return QLA_FUNCTION_FAILED;
4396 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4397 "Entered %s.\n", __func__);
4399 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4400 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4401 FAC_OPT_CMD_WRITE_PROTECT;
4402 mcp->out_mb = MBX_1|MBX_0;
4403 mcp->in_mb = MBX_1|MBX_0;
4404 mcp->tov = MBX_TOV_SECONDS;
4406 rval = qla2x00_mailbox_command(vha, mcp);
4408 if (rval != QLA_SUCCESS) {
4409 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4410 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4411 rval, mcp->mb[0], mcp->mb[1]);
4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4414 "Done %s.\n", __func__);
4421 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4425 mbx_cmd_t *mcp = &mc;
4427 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4428 !IS_QLA27XX(vha->hw))
4429 return QLA_FUNCTION_FAILED;
4431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4432 "Entered %s.\n", __func__);
4434 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4435 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4436 mcp->mb[2] = LSW(start);
4437 mcp->mb[3] = MSW(start);
4438 mcp->mb[4] = LSW(finish);
4439 mcp->mb[5] = MSW(finish);
4440 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4441 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4442 mcp->tov = MBX_TOV_SECONDS;
4444 rval = qla2x00_mailbox_command(vha, mcp);
4446 if (rval != QLA_SUCCESS) {
4447 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4448 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4449 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4452 "Done %s.\n", __func__);
4459 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4463 mbx_cmd_t *mcp = &mc;
4465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4466 "Entered %s.\n", __func__);
4468 mcp->mb[0] = MBC_RESTART_MPI_FW;
4469 mcp->out_mb = MBX_0;
4470 mcp->in_mb = MBX_0|MBX_1;
4471 mcp->tov = MBX_TOV_SECONDS;
4473 rval = qla2x00_mailbox_command(vha, mcp);
4475 if (rval != QLA_SUCCESS) {
4476 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4477 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4478 rval, mcp->mb[0], mcp->mb[1]);
4480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4481 "Done %s.\n", __func__);
4488 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4492 mbx_cmd_t *mcp = &mc;
4496 struct qla_hw_data *ha = vha->hw;
4498 if (!IS_P3P_TYPE(ha))
4499 return QLA_FUNCTION_FAILED;
4501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4502 "Entered %s.\n", __func__);
4504 str = (void *)version;
4505 len = strlen(version);
4507 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4508 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4509 mcp->out_mb = MBX_1|MBX_0;
4510 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4511 mcp->mb[i] = cpu_to_le16p(str);
4512 mcp->out_mb |= 1<<i;
4514 for (; i < 16; i++) {
4516 mcp->out_mb |= 1<<i;
4518 mcp->in_mb = MBX_1|MBX_0;
4519 mcp->tov = MBX_TOV_SECONDS;
4521 rval = qla2x00_mailbox_command(vha, mcp);
4523 if (rval != QLA_SUCCESS) {
4524 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4525 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4528 "Done %s.\n", __func__);
4535 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4539 mbx_cmd_t *mcp = &mc;
4544 struct qla_hw_data *ha = vha->hw;
4546 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4548 return QLA_FUNCTION_FAILED;
4550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4551 "Entered %s.\n", __func__);
4553 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4555 ql_log(ql_log_warn, vha, 0x117f,
4556 "Failed to allocate driver version param.\n");
4557 return QLA_MEMORY_ALLOC_FAILED;
4560 memcpy(str, "\x7\x3\x11\x0", 4);
4562 len = dwlen * 4 - 4;
4563 memset(str + 4, 0, len);
4564 if (len > strlen(version))
4565 len = strlen(version);
4566 memcpy(str + 4, version, len);
4568 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4569 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4570 mcp->mb[2] = MSW(LSD(str_dma));
4571 mcp->mb[3] = LSW(LSD(str_dma));
4572 mcp->mb[6] = MSW(MSD(str_dma));
4573 mcp->mb[7] = LSW(MSD(str_dma));
4574 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4575 mcp->in_mb = MBX_1|MBX_0;
4576 mcp->tov = MBX_TOV_SECONDS;
4578 rval = qla2x00_mailbox_command(vha, mcp);
4580 if (rval != QLA_SUCCESS) {
4581 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4582 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4585 "Done %s.\n", __func__);
4588 dma_pool_free(ha->s_dma_pool, str, str_dma);
4594 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4598 mbx_cmd_t *mcp = &mc;
4600 if (!IS_FWI2_CAPABLE(vha->hw))
4601 return QLA_FUNCTION_FAILED;
4603 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4604 "Entered %s.\n", __func__);
4606 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4607 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4608 mcp->out_mb = MBX_1|MBX_0;
4609 mcp->in_mb = MBX_1|MBX_0;
4610 mcp->tov = MBX_TOV_SECONDS;
4612 rval = qla2x00_mailbox_command(vha, mcp);
4615 if (rval != QLA_SUCCESS) {
4616 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4617 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4620 "Done %s.\n", __func__);
4627 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4628 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4632 mbx_cmd_t *mcp = &mc;
4633 struct qla_hw_data *ha = vha->hw;
4635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4636 "Entered %s.\n", __func__);
4638 if (!IS_FWI2_CAPABLE(ha))
4639 return QLA_FUNCTION_FAILED;
4644 mcp->mb[0] = MBC_READ_SFP;
4646 mcp->mb[2] = MSW(sfp_dma);
4647 mcp->mb[3] = LSW(sfp_dma);
4648 mcp->mb[6] = MSW(MSD(sfp_dma));
4649 mcp->mb[7] = LSW(MSD(sfp_dma));
4653 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4654 mcp->in_mb = MBX_1|MBX_0;
4655 mcp->tov = MBX_TOV_SECONDS;
4657 rval = qla2x00_mailbox_command(vha, mcp);
4662 if (rval != QLA_SUCCESS) {
4663 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4664 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4665 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4667 /* sfp is not there */
4668 rval = QLA_INTERFACE_ERROR;
4670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4671 "Done %s.\n", __func__);
4678 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4679 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4683 mbx_cmd_t *mcp = &mc;
4684 struct qla_hw_data *ha = vha->hw;
4686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4687 "Entered %s.\n", __func__);
4689 if (!IS_FWI2_CAPABLE(ha))
4690 return QLA_FUNCTION_FAILED;
4698 mcp->mb[0] = MBC_WRITE_SFP;
4700 mcp->mb[2] = MSW(sfp_dma);
4701 mcp->mb[3] = LSW(sfp_dma);
4702 mcp->mb[6] = MSW(MSD(sfp_dma));
4703 mcp->mb[7] = LSW(MSD(sfp_dma));
4707 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4708 mcp->in_mb = MBX_1|MBX_0;
4709 mcp->tov = MBX_TOV_SECONDS;
4711 rval = qla2x00_mailbox_command(vha, mcp);
4713 if (rval != QLA_SUCCESS) {
4714 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4715 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4718 "Done %s.\n", __func__);
4725 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4726 uint16_t size_in_bytes, uint16_t *actual_size)
4730 mbx_cmd_t *mcp = &mc;
4732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4733 "Entered %s.\n", __func__);
4735 if (!IS_CNA_CAPABLE(vha->hw))
4736 return QLA_FUNCTION_FAILED;
4738 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4739 mcp->mb[2] = MSW(stats_dma);
4740 mcp->mb[3] = LSW(stats_dma);
4741 mcp->mb[6] = MSW(MSD(stats_dma));
4742 mcp->mb[7] = LSW(MSD(stats_dma));
4743 mcp->mb[8] = size_in_bytes >> 2;
4744 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4745 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4746 mcp->tov = MBX_TOV_SECONDS;
4748 rval = qla2x00_mailbox_command(vha, mcp);
4750 if (rval != QLA_SUCCESS) {
4751 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4752 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4753 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4756 "Done %s.\n", __func__);
4759 *actual_size = mcp->mb[2] << 2;
4766 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4771 mbx_cmd_t *mcp = &mc;
4773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4774 "Entered %s.\n", __func__);
4776 if (!IS_CNA_CAPABLE(vha->hw))
4777 return QLA_FUNCTION_FAILED;
4779 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4781 mcp->mb[2] = MSW(tlv_dma);
4782 mcp->mb[3] = LSW(tlv_dma);
4783 mcp->mb[6] = MSW(MSD(tlv_dma));
4784 mcp->mb[7] = LSW(MSD(tlv_dma));
4786 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4787 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4788 mcp->tov = MBX_TOV_SECONDS;
4790 rval = qla2x00_mailbox_command(vha, mcp);
4792 if (rval != QLA_SUCCESS) {
4793 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4794 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4795 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4798 "Done %s.\n", __func__);
4805 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4809 mbx_cmd_t *mcp = &mc;
4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4812 "Entered %s.\n", __func__);
4814 if (!IS_FWI2_CAPABLE(vha->hw))
4815 return QLA_FUNCTION_FAILED;
4817 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4818 mcp->mb[1] = LSW(risc_addr);
4819 mcp->mb[8] = MSW(risc_addr);
4820 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4821 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4824 rval = qla2x00_mailbox_command(vha, mcp);
4825 if (rval != QLA_SUCCESS) {
4826 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4827 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4830 "Done %s.\n", __func__);
4831 *data = mcp->mb[3] << 16 | mcp->mb[2];
4838 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4843 mbx_cmd_t *mcp = &mc;
4845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4846 "Entered %s.\n", __func__);
4848 memset(mcp->mb, 0 , sizeof(mcp->mb));
4849 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4850 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4852 /* transfer count */
4853 mcp->mb[10] = LSW(mreq->transfer_size);
4854 mcp->mb[11] = MSW(mreq->transfer_size);
4856 /* send data address */
4857 mcp->mb[14] = LSW(mreq->send_dma);
4858 mcp->mb[15] = MSW(mreq->send_dma);
4859 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4860 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4862 /* receive data address */
4863 mcp->mb[16] = LSW(mreq->rcv_dma);
4864 mcp->mb[17] = MSW(mreq->rcv_dma);
4865 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4866 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4868 /* Iteration count */
4869 mcp->mb[18] = LSW(mreq->iteration_count);
4870 mcp->mb[19] = MSW(mreq->iteration_count);
4872 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4873 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4874 if (IS_CNA_CAPABLE(vha->hw))
4875 mcp->out_mb |= MBX_2;
4876 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4878 mcp->buf_size = mreq->transfer_size;
4879 mcp->tov = MBX_TOV_SECONDS;
4880 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4882 rval = qla2x00_mailbox_command(vha, mcp);
4884 if (rval != QLA_SUCCESS) {
4885 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4886 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4887 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4888 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4891 "Done %s.\n", __func__);
4894 /* Copy mailbox information */
4895 memcpy( mresp, mcp->mb, 64);
4900 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4905 mbx_cmd_t *mcp = &mc;
4906 struct qla_hw_data *ha = vha->hw;
4908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4909 "Entered %s.\n", __func__);
4911 memset(mcp->mb, 0 , sizeof(mcp->mb));
4912 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4913 /* BIT_6 specifies 64bit address */
4914 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4915 if (IS_CNA_CAPABLE(ha)) {
4916 mcp->mb[2] = vha->fcoe_fcf_idx;
4918 mcp->mb[16] = LSW(mreq->rcv_dma);
4919 mcp->mb[17] = MSW(mreq->rcv_dma);
4920 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4921 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4923 mcp->mb[10] = LSW(mreq->transfer_size);
4925 mcp->mb[14] = LSW(mreq->send_dma);
4926 mcp->mb[15] = MSW(mreq->send_dma);
4927 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4928 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4930 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4931 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4932 if (IS_CNA_CAPABLE(ha))
4933 mcp->out_mb |= MBX_2;
4936 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4937 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4938 mcp->in_mb |= MBX_1;
4939 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4940 mcp->in_mb |= MBX_3;
4942 mcp->tov = MBX_TOV_SECONDS;
4943 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4944 mcp->buf_size = mreq->transfer_size;
4946 rval = qla2x00_mailbox_command(vha, mcp);
4948 if (rval != QLA_SUCCESS) {
4949 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4950 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4951 rval, mcp->mb[0], mcp->mb[1]);
4953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4954 "Done %s.\n", __func__);
4957 /* Copy mailbox information */
4958 memcpy(mresp, mcp->mb, 64);
4963 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
4967 mbx_cmd_t *mcp = &mc;
4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
4970 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
4972 mcp->mb[0] = MBC_ISP84XX_RESET;
4973 mcp->mb[1] = enable_diagnostic;
4974 mcp->out_mb = MBX_1|MBX_0;
4975 mcp->in_mb = MBX_1|MBX_0;
4976 mcp->tov = MBX_TOV_SECONDS;
4977 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4978 rval = qla2x00_mailbox_command(vha, mcp);
4980 if (rval != QLA_SUCCESS)
4981 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
4983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4984 "Done %s.\n", __func__);
4990 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
4994 mbx_cmd_t *mcp = &mc;
4996 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4997 "Entered %s.\n", __func__);
4999 if (!IS_FWI2_CAPABLE(vha->hw))
5000 return QLA_FUNCTION_FAILED;
5002 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5003 mcp->mb[1] = LSW(risc_addr);
5004 mcp->mb[2] = LSW(data);
5005 mcp->mb[3] = MSW(data);
5006 mcp->mb[8] = MSW(risc_addr);
5007 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5011 rval = qla2x00_mailbox_command(vha, mcp);
5012 if (rval != QLA_SUCCESS) {
5013 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5014 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5017 "Done %s.\n", __func__);
5024 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5027 uint32_t stat, timer;
5029 struct qla_hw_data *ha = vha->hw;
5030 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5035 "Entered %s.\n", __func__);
5037 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5039 /* Write the MBC data to the registers */
5040 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5041 WRT_REG_WORD(®->mailbox1, mb[0]);
5042 WRT_REG_WORD(®->mailbox2, mb[1]);
5043 WRT_REG_WORD(®->mailbox3, mb[2]);
5044 WRT_REG_WORD(®->mailbox4, mb[3]);
5046 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
5048 /* Poll for MBC interrupt */
5049 for (timer = 6000000; timer; timer--) {
5050 /* Check for pending interrupts. */
5051 stat = RD_REG_DWORD(®->host_status);
5052 if (stat & HSRX_RISC_INT) {
5055 if (stat == 0x1 || stat == 0x2 ||
5056 stat == 0x10 || stat == 0x11) {
5057 set_bit(MBX_INTERRUPT,
5058 &ha->mbx_cmd_flags);
5059 mb0 = RD_REG_WORD(®->mailbox0);
5060 WRT_REG_DWORD(®->hccr,
5061 HCCRX_CLR_RISC_INT);
5062 RD_REG_DWORD(®->hccr);
5069 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5070 rval = mb0 & MBS_MASK;
5072 rval = QLA_FUNCTION_FAILED;
5074 if (rval != QLA_SUCCESS) {
5075 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5076 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5079 "Done %s.\n", __func__);
5086 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5090 mbx_cmd_t *mcp = &mc;
5091 struct qla_hw_data *ha = vha->hw;
5093 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5094 "Entered %s.\n", __func__);
5096 if (!IS_FWI2_CAPABLE(ha))
5097 return QLA_FUNCTION_FAILED;
5099 mcp->mb[0] = MBC_DATA_RATE;
5101 mcp->out_mb = MBX_1|MBX_0;
5102 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5103 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5104 mcp->in_mb |= MBX_3;
5105 mcp->tov = MBX_TOV_SECONDS;
5107 rval = qla2x00_mailbox_command(vha, mcp);
5108 if (rval != QLA_SUCCESS) {
5109 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5110 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5113 "Done %s.\n", __func__);
5114 if (mcp->mb[1] != 0x7)
5115 ha->link_data_rate = mcp->mb[1];
5122 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5126 mbx_cmd_t *mcp = &mc;
5127 struct qla_hw_data *ha = vha->hw;
5129 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5130 "Entered %s.\n", __func__);
5132 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5134 return QLA_FUNCTION_FAILED;
5135 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5136 mcp->out_mb = MBX_0;
5137 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5138 mcp->tov = MBX_TOV_SECONDS;
5141 rval = qla2x00_mailbox_command(vha, mcp);
5143 if (rval != QLA_SUCCESS) {
5144 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5145 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5147 /* Copy all bits to preserve original value */
5148 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5150 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5151 "Done %s.\n", __func__);
5157 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5161 mbx_cmd_t *mcp = &mc;
5163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5164 "Entered %s.\n", __func__);
5166 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5167 /* Copy all bits to preserve original setting */
5168 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5169 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5171 mcp->tov = MBX_TOV_SECONDS;
5173 rval = qla2x00_mailbox_command(vha, mcp);
5175 if (rval != QLA_SUCCESS) {
5176 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5177 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5179 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5180 "Done %s.\n", __func__);
5187 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5192 mbx_cmd_t *mcp = &mc;
5193 struct qla_hw_data *ha = vha->hw;
5195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5196 "Entered %s.\n", __func__);
5198 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5199 return QLA_FUNCTION_FAILED;
5201 mcp->mb[0] = MBC_PORT_PARAMS;
5202 mcp->mb[1] = loop_id;
5203 if (ha->flags.fcp_prio_enabled)
5207 mcp->mb[4] = priority & 0xf;
5208 mcp->mb[9] = vha->vp_idx;
5209 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5210 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5213 rval = qla2x00_mailbox_command(vha, mcp);
5221 if (rval != QLA_SUCCESS) {
5222 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5225 "Done %s.\n", __func__);
5232 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5234 int rval = QLA_FUNCTION_FAILED;
5235 struct qla_hw_data *ha = vha->hw;
5238 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5239 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5240 "Thermal not supported by this card.\n");
5244 if (IS_QLA25XX(ha)) {
5245 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5246 ha->pdev->subsystem_device == 0x0175) {
5247 rval = qla2x00_read_sfp(vha, 0, &byte,
5248 0x98, 0x1, 1, BIT_13|BIT_0);
5252 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5253 ha->pdev->subsystem_device == 0x338e) {
5254 rval = qla2x00_read_sfp(vha, 0, &byte,
5255 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5259 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5260 "Thermal not supported by this card.\n");
5264 if (IS_QLA82XX(ha)) {
5265 *temp = qla82xx_read_temperature(vha);
5268 } else if (IS_QLA8044(ha)) {
5269 *temp = qla8044_read_temperature(vha);
5274 rval = qla2x00_read_asic_temperature(vha, temp);
5279 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5282 struct qla_hw_data *ha = vha->hw;
5284 mbx_cmd_t *mcp = &mc;
5286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5287 "Entered %s.\n", __func__);
5289 if (!IS_FWI2_CAPABLE(ha))
5290 return QLA_FUNCTION_FAILED;
5292 memset(mcp, 0, sizeof(mbx_cmd_t));
5293 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5296 mcp->out_mb = MBX_1|MBX_0;
5301 rval = qla2x00_mailbox_command(vha, mcp);
5302 if (rval != QLA_SUCCESS) {
5303 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5304 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5307 "Done %s.\n", __func__);
5314 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5317 struct qla_hw_data *ha = vha->hw;
5319 mbx_cmd_t *mcp = &mc;
5321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5322 "Entered %s.\n", __func__);
5324 if (!IS_P3P_TYPE(ha))
5325 return QLA_FUNCTION_FAILED;
5327 memset(mcp, 0, sizeof(mbx_cmd_t));
5328 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5331 mcp->out_mb = MBX_1|MBX_0;
5336 rval = qla2x00_mailbox_command(vha, mcp);
5337 if (rval != QLA_SUCCESS) {
5338 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5339 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5342 "Done %s.\n", __func__);
5349 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5351 struct qla_hw_data *ha = vha->hw;
5353 mbx_cmd_t *mcp = &mc;
5354 int rval = QLA_FUNCTION_FAILED;
5356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5357 "Entered %s.\n", __func__);
5359 memset(mcp->mb, 0 , sizeof(mcp->mb));
5360 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5361 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5362 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5363 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5365 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5366 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5367 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5369 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5370 mcp->tov = MBX_TOV_SECONDS;
5371 rval = qla2x00_mailbox_command(vha, mcp);
5373 /* Always copy back return mailbox values. */
5374 if (rval != QLA_SUCCESS) {
5375 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5376 "mailbox command FAILED=0x%x, subcode=%x.\n",
5377 (mcp->mb[1] << 16) | mcp->mb[0],
5378 (mcp->mb[3] << 16) | mcp->mb[2]);
5380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5381 "Done %s.\n", __func__);
5382 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5383 if (!ha->md_template_size) {
5384 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5385 "Null template size obtained.\n");
5386 rval = QLA_FUNCTION_FAILED;
5393 qla82xx_md_get_template(scsi_qla_host_t *vha)
5395 struct qla_hw_data *ha = vha->hw;
5397 mbx_cmd_t *mcp = &mc;
5398 int rval = QLA_FUNCTION_FAILED;
5400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5401 "Entered %s.\n", __func__);
5403 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5404 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5405 if (!ha->md_tmplt_hdr) {
5406 ql_log(ql_log_warn, vha, 0x1124,
5407 "Unable to allocate memory for Minidump template.\n");
5411 memset(mcp->mb, 0 , sizeof(mcp->mb));
5412 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5413 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5414 mcp->mb[2] = LSW(RQST_TMPLT);
5415 mcp->mb[3] = MSW(RQST_TMPLT);
5416 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5417 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5418 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5419 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5420 mcp->mb[8] = LSW(ha->md_template_size);
5421 mcp->mb[9] = MSW(ha->md_template_size);
5423 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5424 mcp->tov = MBX_TOV_SECONDS;
5425 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5426 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5427 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5428 rval = qla2x00_mailbox_command(vha, mcp);
5430 if (rval != QLA_SUCCESS) {
5431 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5432 "mailbox command FAILED=0x%x, subcode=%x.\n",
5433 ((mcp->mb[1] << 16) | mcp->mb[0]),
5434 ((mcp->mb[3] << 16) | mcp->mb[2]));
5436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5437 "Done %s.\n", __func__);
5442 qla8044_md_get_template(scsi_qla_host_t *vha)
5444 struct qla_hw_data *ha = vha->hw;
5446 mbx_cmd_t *mcp = &mc;
5447 int rval = QLA_FUNCTION_FAILED;
5448 int offset = 0, size = MINIDUMP_SIZE_36K;
5449 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5450 "Entered %s.\n", __func__);
5452 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5453 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5454 if (!ha->md_tmplt_hdr) {
5455 ql_log(ql_log_warn, vha, 0xb11b,
5456 "Unable to allocate memory for Minidump template.\n");
5460 memset(mcp->mb, 0 , sizeof(mcp->mb));
5461 while (offset < ha->md_template_size) {
5462 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5463 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5464 mcp->mb[2] = LSW(RQST_TMPLT);
5465 mcp->mb[3] = MSW(RQST_TMPLT);
5466 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5467 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5468 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5469 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5470 mcp->mb[8] = LSW(size);
5471 mcp->mb[9] = MSW(size);
5472 mcp->mb[10] = offset & 0x0000FFFF;
5473 mcp->mb[11] = offset & 0xFFFF0000;
5474 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5475 mcp->tov = MBX_TOV_SECONDS;
5476 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5477 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5478 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5479 rval = qla2x00_mailbox_command(vha, mcp);
5481 if (rval != QLA_SUCCESS) {
5482 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5483 "mailbox command FAILED=0x%x, subcode=%x.\n",
5484 ((mcp->mb[1] << 16) | mcp->mb[0]),
5485 ((mcp->mb[3] << 16) | mcp->mb[2]));
5488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5489 "Done %s.\n", __func__);
5490 offset = offset + size;
5496 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5499 struct qla_hw_data *ha = vha->hw;
5501 mbx_cmd_t *mcp = &mc;
5503 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5504 return QLA_FUNCTION_FAILED;
5506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5507 "Entered %s.\n", __func__);
5509 memset(mcp, 0, sizeof(mbx_cmd_t));
5510 mcp->mb[0] = MBC_SET_LED_CONFIG;
5511 mcp->mb[1] = led_cfg[0];
5512 mcp->mb[2] = led_cfg[1];
5513 if (IS_QLA8031(ha)) {
5514 mcp->mb[3] = led_cfg[2];
5515 mcp->mb[4] = led_cfg[3];
5516 mcp->mb[5] = led_cfg[4];
5517 mcp->mb[6] = led_cfg[5];
5520 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5522 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5527 rval = qla2x00_mailbox_command(vha, mcp);
5528 if (rval != QLA_SUCCESS) {
5529 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5530 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5533 "Done %s.\n", __func__);
5540 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5543 struct qla_hw_data *ha = vha->hw;
5545 mbx_cmd_t *mcp = &mc;
5547 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5548 return QLA_FUNCTION_FAILED;
5550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5551 "Entered %s.\n", __func__);
5553 memset(mcp, 0, sizeof(mbx_cmd_t));
5554 mcp->mb[0] = MBC_GET_LED_CONFIG;
5556 mcp->out_mb = MBX_0;
5557 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5559 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5563 rval = qla2x00_mailbox_command(vha, mcp);
5564 if (rval != QLA_SUCCESS) {
5565 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5566 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5568 led_cfg[0] = mcp->mb[1];
5569 led_cfg[1] = mcp->mb[2];
5570 if (IS_QLA8031(ha)) {
5571 led_cfg[2] = mcp->mb[3];
5572 led_cfg[3] = mcp->mb[4];
5573 led_cfg[4] = mcp->mb[5];
5574 led_cfg[5] = mcp->mb[6];
5576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5577 "Done %s.\n", __func__);
5584 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5587 struct qla_hw_data *ha = vha->hw;
5589 mbx_cmd_t *mcp = &mc;
5591 if (!IS_P3P_TYPE(ha))
5592 return QLA_FUNCTION_FAILED;
5594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5595 "Entered %s.\n", __func__);
5597 memset(mcp, 0, sizeof(mbx_cmd_t));
5598 mcp->mb[0] = MBC_SET_LED_CONFIG;
5604 mcp->out_mb = MBX_7|MBX_0;
5606 mcp->tov = MBX_TOV_SECONDS;
5609 rval = qla2x00_mailbox_command(vha, mcp);
5610 if (rval != QLA_SUCCESS) {
5611 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5612 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5615 "Done %s.\n", __func__);
5622 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5625 struct qla_hw_data *ha = vha->hw;
5627 mbx_cmd_t *mcp = &mc;
5629 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5630 return QLA_FUNCTION_FAILED;
5632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5633 "Entered %s.\n", __func__);
5635 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5636 mcp->mb[1] = LSW(reg);
5637 mcp->mb[2] = MSW(reg);
5638 mcp->mb[3] = LSW(data);
5639 mcp->mb[4] = MSW(data);
5640 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5642 mcp->in_mb = MBX_1|MBX_0;
5643 mcp->tov = MBX_TOV_SECONDS;
5645 rval = qla2x00_mailbox_command(vha, mcp);
5647 if (rval != QLA_SUCCESS) {
5648 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5649 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5652 "Done %s.\n", __func__);
5659 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5662 struct qla_hw_data *ha = vha->hw;
5664 mbx_cmd_t *mcp = &mc;
5666 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5668 "Implicit LOGO Unsupported.\n");
5669 return QLA_FUNCTION_FAILED;
5673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5674 "Entering %s.\n", __func__);
5676 /* Perform Implicit LOGO. */
5677 mcp->mb[0] = MBC_PORT_LOGOUT;
5678 mcp->mb[1] = fcport->loop_id;
5679 mcp->mb[10] = BIT_15;
5680 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5682 mcp->tov = MBX_TOV_SECONDS;
5684 rval = qla2x00_mailbox_command(vha, mcp);
5685 if (rval != QLA_SUCCESS)
5686 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5687 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5690 "Done %s.\n", __func__);
5696 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5700 mbx_cmd_t *mcp = &mc;
5701 struct qla_hw_data *ha = vha->hw;
5702 unsigned long retry_max_time = jiffies + (2 * HZ);
5704 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5705 return QLA_FUNCTION_FAILED;
5707 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5710 mcp->mb[0] = MBC_READ_REMOTE_REG;
5711 mcp->mb[1] = LSW(reg);
5712 mcp->mb[2] = MSW(reg);
5713 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5714 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5715 mcp->tov = MBX_TOV_SECONDS;
5717 rval = qla2x00_mailbox_command(vha, mcp);
5719 if (rval != QLA_SUCCESS) {
5720 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5721 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5722 rval, mcp->mb[0], mcp->mb[1]);
5724 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5725 if (*data == QLA8XXX_BAD_VALUE) {
5727 * During soft-reset CAMRAM register reads might
5728 * return 0xbad0bad0. So retry for MAX of 2 sec
5729 * while reading camram registers.
5731 if (time_after(jiffies, retry_max_time)) {
5732 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5733 "Failure to read CAMRAM register. "
5734 "data=0x%x.\n", *data);
5735 return QLA_FUNCTION_FAILED;
5740 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5747 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5751 mbx_cmd_t *mcp = &mc;
5752 struct qla_hw_data *ha = vha->hw;
5754 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5755 return QLA_FUNCTION_FAILED;
5757 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5759 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5760 mcp->out_mb = MBX_0;
5761 mcp->in_mb = MBX_1|MBX_0;
5762 mcp->tov = MBX_TOV_SECONDS;
5764 rval = qla2x00_mailbox_command(vha, mcp);
5766 if (rval != QLA_SUCCESS) {
5767 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5768 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5769 rval, mcp->mb[0], mcp->mb[1]);
5770 ha->isp_ops->fw_dump(vha, 0);
5772 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5779 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5780 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5784 mbx_cmd_t *mcp = &mc;
5785 uint8_t subcode = (uint8_t)options;
5786 struct qla_hw_data *ha = vha->hw;
5788 if (!IS_QLA8031(ha))
5789 return QLA_FUNCTION_FAILED;
5791 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5793 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5794 mcp->mb[1] = options;
5795 mcp->out_mb = MBX_1|MBX_0;
5796 if (subcode & BIT_2) {
5797 mcp->mb[2] = LSW(start_addr);
5798 mcp->mb[3] = MSW(start_addr);
5799 mcp->mb[4] = LSW(end_addr);
5800 mcp->mb[5] = MSW(end_addr);
5801 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5803 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5804 if (!(subcode & (BIT_2 | BIT_5)))
5805 mcp->in_mb |= MBX_4|MBX_3;
5806 mcp->tov = MBX_TOV_SECONDS;
5808 rval = qla2x00_mailbox_command(vha, mcp);
5810 if (rval != QLA_SUCCESS) {
5811 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5812 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5813 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5815 ha->isp_ops->fw_dump(vha, 0);
5817 if (subcode & BIT_5)
5818 *sector_size = mcp->mb[1];
5819 else if (subcode & (BIT_6 | BIT_7)) {
5820 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5821 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5822 } else if (subcode & (BIT_3 | BIT_4)) {
5823 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5824 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5826 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5833 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5838 mbx_cmd_t *mcp = &mc;
5840 if (!IS_MCTP_CAPABLE(vha->hw))
5841 return QLA_FUNCTION_FAILED;
5843 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5844 "Entered %s.\n", __func__);
5846 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5847 mcp->mb[1] = LSW(addr);
5848 mcp->mb[2] = MSW(req_dma);
5849 mcp->mb[3] = LSW(req_dma);
5850 mcp->mb[4] = MSW(size);
5851 mcp->mb[5] = LSW(size);
5852 mcp->mb[6] = MSW(MSD(req_dma));
5853 mcp->mb[7] = LSW(MSD(req_dma));
5854 mcp->mb[8] = MSW(addr);
5855 /* Setting RAM ID to valid */
5856 /* For MCTP RAM ID is 0x40 */
5857 mcp->mb[10] = BIT_7 | 0x40;
5859 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5863 mcp->tov = MBX_TOV_SECONDS;
5865 rval = qla2x00_mailbox_command(vha, mcp);
5867 if (rval != QLA_SUCCESS) {
5868 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5869 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5872 "Done %s.\n", __func__);
5879 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5880 void *dd_buf, uint size, uint options)
5884 mbx_cmd_t *mcp = &mc;
5887 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5888 return QLA_FUNCTION_FAILED;
5890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5891 "Entered %s.\n", __func__);
5893 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5894 dd_buf, size, DMA_FROM_DEVICE);
5895 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
5896 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5897 return QLA_MEMORY_ALLOC_FAILED;
5900 memset(dd_buf, 0, size);
5902 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5903 mcp->mb[1] = options;
5904 mcp->mb[2] = MSW(LSD(dd_dma));
5905 mcp->mb[3] = LSW(LSD(dd_dma));
5906 mcp->mb[6] = MSW(MSD(dd_dma));
5907 mcp->mb[7] = LSW(MSD(dd_dma));
5909 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5910 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5911 mcp->buf_size = size;
5912 mcp->flags = MBX_DMA_IN;
5913 mcp->tov = MBX_TOV_SECONDS * 4;
5914 rval = qla2x00_mailbox_command(vha, mcp);
5916 if (rval != QLA_SUCCESS) {
5917 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
5919 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
5920 "Done %s.\n", __func__);
5923 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
5924 size, DMA_FROM_DEVICE);
5929 static void qla2x00_async_mb_sp_done(void *s, int res)
5933 sp->u.iocb_cmd.u.mbx.rc = res;
5935 complete(&sp->u.iocb_cmd.u.mbx.comp);
5936 /* don't free sp here. Let the caller do the free */
5940 * This mailbox uses the iocb interface to send MB command.
5941 * This allows non-critial (non chip setup) command to go
5944 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
5946 int rval = QLA_FUNCTION_FAILED;
5950 if (!vha->hw->flags.fw_started)
5953 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
5957 sp->type = SRB_MB_IOCB;
5958 sp->name = mb_to_str(mcp->mb[0]);
5960 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5962 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
5964 c = &sp->u.iocb_cmd;
5965 c->timeout = qla2x00_async_iocb_timeout;
5966 init_completion(&c->u.mbx.comp);
5968 sp->done = qla2x00_async_mb_sp_done;
5970 rval = qla2x00_start_sp(sp);
5971 if (rval != QLA_SUCCESS) {
5972 ql_dbg(ql_dbg_mbx, vha, 0x1018,
5973 "%s: %s Failed submission. %x.\n",
5974 __func__, sp->name, rval);
5978 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
5979 sp->name, sp->handle);
5981 wait_for_completion(&c->u.mbx.comp);
5982 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
5986 case QLA_FUNCTION_TIMEOUT:
5987 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
5988 __func__, sp->name, rval);
5991 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
5992 __func__, sp->name);
5996 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
5997 __func__, sp->name, rval);
6012 * NOTE: Do not call this routine from DPC thread
6014 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6016 int rval = QLA_FUNCTION_FAILED;
6018 struct port_database_24xx *pd;
6019 struct qla_hw_data *ha = vha->hw;
6022 if (!vha->hw->flags.fw_started)
6025 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6027 ql_log(ql_log_warn, vha, 0xd047,
6028 "Failed to allocate port database structure.\n");
6031 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
6033 memset(&mc, 0, sizeof(mc));
6034 mc.mb[0] = MBC_GET_PORT_DATABASE;
6035 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6036 mc.mb[2] = MSW(pd_dma);
6037 mc.mb[3] = LSW(pd_dma);
6038 mc.mb[6] = MSW(MSD(pd_dma));
6039 mc.mb[7] = LSW(MSD(pd_dma));
6040 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6041 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6043 rval = qla24xx_send_mb_cmd(vha, &mc);
6044 if (rval != QLA_SUCCESS) {
6045 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6046 "%s: %8phC fail\n", __func__, fcport->port_name);
6050 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6052 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6053 __func__, fcport->port_name);
6057 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6062 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6063 struct port_database_24xx *pd)
6065 int rval = QLA_SUCCESS;
6067 u8 current_login_state, last_login_state;
6069 if (fcport->fc4f_nvme) {
6070 current_login_state = pd->current_login_state >> 4;
6071 last_login_state = pd->last_login_state >> 4;
6073 current_login_state = pd->current_login_state & 0xf;
6074 last_login_state = pd->last_login_state & 0xf;
6077 /* Check for logged in state. */
6078 if (current_login_state != PDS_PRLI_COMPLETE) {
6079 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6080 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6081 current_login_state, last_login_state, fcport->loop_id);
6082 rval = QLA_FUNCTION_FAILED;
6086 if (fcport->loop_id == FC_NO_LOOP_ID ||
6087 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6088 memcmp(fcport->port_name, pd->port_name, 8))) {
6089 /* We lost the device mid way. */
6090 rval = QLA_NOT_LOGGED_IN;
6094 /* Names are little-endian. */
6095 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6096 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6098 /* Get port_id of device. */
6099 fcport->d_id.b.domain = pd->port_id[0];
6100 fcport->d_id.b.area = pd->port_id[1];
6101 fcport->d_id.b.al_pa = pd->port_id[2];
6102 fcport->d_id.b.rsvd_1 = 0;
6104 if (fcport->fc4f_nvme) {
6105 fcport->nvme_prli_service_param =
6106 pd->prli_nvme_svc_param_word_3;
6107 fcport->port_type = FCT_NVME;
6109 /* If not target must be initiator or unknown type. */
6110 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6111 fcport->port_type = FCT_INITIATOR;
6113 fcport->port_type = FCT_TARGET;
6115 /* Passback COS information. */
6116 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6117 FC_COS_CLASS2 : FC_COS_CLASS3;
6119 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6120 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6121 fcport->conf_compl_supported = 1;
6129 * qla24xx_gidlist__wait
6130 * NOTE: don't call this routine from DPC thread.
6132 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6133 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6135 int rval = QLA_FUNCTION_FAILED;
6138 if (!vha->hw->flags.fw_started)
6141 memset(&mc, 0, sizeof(mc));
6142 mc.mb[0] = MBC_GET_ID_LIST;
6143 mc.mb[2] = MSW(id_list_dma);
6144 mc.mb[3] = LSW(id_list_dma);
6145 mc.mb[6] = MSW(MSD(id_list_dma));
6146 mc.mb[7] = LSW(MSD(id_list_dma));
6148 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6150 rval = qla24xx_send_mb_cmd(vha, &mc);
6151 if (rval != QLA_SUCCESS) {
6152 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6153 "%s: fail\n", __func__);
6155 *entries = mc.mb[1];
6156 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6157 "%s: done\n", __func__);
6163 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6167 mbx_cmd_t *mcp = &mc;
6169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6170 "Entered %s\n", __func__);
6172 memset(mcp->mb, 0 , sizeof(mcp->mb));
6173 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6174 mcp->mb[1] = cpu_to_le16(1);
6175 mcp->mb[2] = cpu_to_le16(value);
6176 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6177 mcp->in_mb = MBX_2 | MBX_0;
6178 mcp->tov = MBX_TOV_SECONDS;
6181 rval = qla2x00_mailbox_command(vha, mcp);
6183 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6184 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6189 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6193 mbx_cmd_t *mcp = &mc;
6195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6196 "Entered %s\n", __func__);
6198 memset(mcp->mb, 0, sizeof(mcp->mb));
6199 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6200 mcp->mb[1] = cpu_to_le16(0);
6201 mcp->out_mb = MBX_1 | MBX_0;
6202 mcp->in_mb = MBX_2 | MBX_0;
6203 mcp->tov = MBX_TOV_SECONDS;
6206 rval = qla2x00_mailbox_command(vha, mcp);
6207 if (rval == QLA_SUCCESS)
6210 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6211 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6217 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6219 struct qla_hw_data *ha = vha->hw;
6220 uint16_t iter, addr, offset;
6221 dma_addr_t phys_addr;
6225 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6227 phys_addr = ha->sfp_data_dma;
6228 sfp_data = ha->sfp_data;
6231 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6233 /* Skip to next device address. */
6238 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6239 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6240 if (rval != QLA_SUCCESS) {
6241 ql_log(ql_log_warn, vha, 0x706d,
6242 "Unable to read SFP data (%x/%x/%x).\n", rval,
6248 if (buf && (c < count)) {
6251 if ((count - c) >= SFP_BLOCK_SIZE)
6252 sz = SFP_BLOCK_SIZE;
6256 memcpy(buf, sfp_data, sz);
6257 buf += SFP_BLOCK_SIZE;
6260 phys_addr += SFP_BLOCK_SIZE;
6261 sfp_data += SFP_BLOCK_SIZE;
6262 offset += SFP_BLOCK_SIZE;