2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
15 * qla2x00_mailbox_command
16 * Issue mailbox command and waits for completion.
19 * ha = adapter block pointer.
20 * mcp = driver internal mbx struct pointer.
23 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
26 * 0 : QLA_SUCCESS = cmd performed success
27 * 1 : QLA_FUNCTION_FAILED (error encountered)
28 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
34 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
37 unsigned long flags = 0;
43 uint16_t __iomem *optr;
46 uint16_t __iomem *mbx_reg;
47 unsigned long wait_time;
48 struct qla_hw_data *ha = vha->hw;
49 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
52 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
54 if (ha->pdev->error_state > pci_channel_io_frozen) {
55 ql_log(ql_log_warn, vha, 0x1001,
56 "error_state is greater than pci_channel_io_frozen, "
58 return QLA_FUNCTION_TIMEOUT;
61 if (vha->device_flags & DFLG_DEV_FAILED) {
62 ql_log(ql_log_warn, vha, 0x1002,
63 "Device in failed state, exiting.\n");
64 return QLA_FUNCTION_TIMEOUT;
68 io_lock_on = base_vha->flags.init_done;
71 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
74 if (ha->flags.pci_channel_io_perm_failure) {
75 ql_log(ql_log_warn, vha, 0x1003,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT;
80 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
81 /* Setting Link-Down error */
82 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 ql_log(ql_log_warn, vha, 0x1004,
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 return QLA_FUNCTION_TIMEOUT;
89 * Wait for active mailbox commands to finish by waiting at most tov
90 * seconds. This is to serialize actual issuing of mailbox cmds during
93 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
94 /* Timeout occurred. Return error. */
95 ql_log(ql_log_warn, vha, 0x1005,
96 "Cmd access timeout, cmd=0x%x, Exiting.\n",
98 return QLA_FUNCTION_TIMEOUT;
101 ha->flags.mbox_busy = 1;
102 /* Save mailbox command for debug */
105 ql_dbg(ql_dbg_mbx, vha, 0x1006,
106 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
108 spin_lock_irqsave(&ha->hardware_lock, flags);
110 /* Load mailbox registers. */
112 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
113 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
114 optr = (uint16_t __iomem *)®->isp24.mailbox0;
116 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
119 command = mcp->mb[0];
120 mboxes = mcp->out_mb;
122 ql_dbg(ql_dbg_mbx, vha, 0x1111,
123 "Mailbox registers (OUT):\n");
124 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
125 if (IS_QLA2200(ha) && cnt == 8)
127 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
128 if (mboxes & BIT_0) {
129 ql_dbg(ql_dbg_mbx, vha, 0x1112,
130 "mbox[%d]<-0x%04x\n", cnt, *iptr);
131 WRT_REG_WORD(optr, *iptr);
139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
140 "I/O Address = %p.\n", optr);
142 /* Issue set host interrupt command to send cmd out. */
143 ha->flags.mbox_int = 0;
144 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
146 /* Unlock mbx registers and wait for interrupt */
147 ql_dbg(ql_dbg_mbx, vha, 0x100f,
148 "Going to unlock irq & waiting for interrupts. "
149 "jiffies=%lx.\n", jiffies);
151 /* Wait for mbx cmd completion until timeout */
153 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
154 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
156 if (IS_P3P_TYPE(ha)) {
157 if (RD_REG_DWORD(®->isp82.hint) &
158 HINT_MBX_INT_PENDING) {
159 spin_unlock_irqrestore(&ha->hardware_lock,
161 ha->flags.mbox_busy = 0;
162 ql_dbg(ql_dbg_mbx, vha, 0x1010,
163 "Pending mailbox timeout, exiting.\n");
164 rval = QLA_FUNCTION_TIMEOUT;
167 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
168 } else if (IS_FWI2_CAPABLE(ha))
169 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
171 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
172 spin_unlock_irqrestore(&ha->hardware_lock, flags);
174 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
176 ql_dbg(ql_dbg_mbx, vha, 0x117a,
177 "cmd=%x Timeout.\n", command);
178 spin_lock_irqsave(&ha->hardware_lock, flags);
179 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
180 spin_unlock_irqrestore(&ha->hardware_lock, flags);
183 ql_dbg(ql_dbg_mbx, vha, 0x1011,
184 "Cmd=%x Polling Mode.\n", command);
186 if (IS_P3P_TYPE(ha)) {
187 if (RD_REG_DWORD(®->isp82.hint) &
188 HINT_MBX_INT_PENDING) {
189 spin_unlock_irqrestore(&ha->hardware_lock,
191 ha->flags.mbox_busy = 0;
192 ql_dbg(ql_dbg_mbx, vha, 0x1012,
193 "Pending mailbox timeout, exiting.\n");
194 rval = QLA_FUNCTION_TIMEOUT;
197 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
198 } else if (IS_FWI2_CAPABLE(ha))
199 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
201 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
202 spin_unlock_irqrestore(&ha->hardware_lock, flags);
204 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
205 while (!ha->flags.mbox_int) {
206 if (time_after(jiffies, wait_time))
209 /* Check for pending interrupts. */
210 qla2x00_poll(ha->rsp_q_map[0]);
212 if (!ha->flags.mbox_int &&
214 command == MBC_LOAD_RISC_RAM_EXTENDED))
217 ql_dbg(ql_dbg_mbx, vha, 0x1013,
219 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
222 /* Check whether we timed out */
223 if (ha->flags.mbox_int) {
226 ql_dbg(ql_dbg_mbx, vha, 0x1014,
227 "Cmd=%x completed.\n", command);
229 /* Got interrupt. Clear the flag. */
230 ha->flags.mbox_int = 0;
231 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
234 ha->flags.mbox_busy = 0;
235 /* Setting Link-Down error */
236 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
238 rval = QLA_FUNCTION_FAILED;
239 ql_log(ql_log_warn, vha, 0x1015,
240 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
244 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
245 rval = QLA_FUNCTION_FAILED;
247 /* Load return mailbox registers. */
249 iptr = (uint16_t *)&ha->mailbox_out[0];
252 ql_dbg(ql_dbg_mbx, vha, 0x1113,
253 "Mailbox registers (IN):\n");
254 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
255 if (mboxes & BIT_0) {
257 ql_dbg(ql_dbg_mbx, vha, 0x1114,
258 "mbox[%d]->0x%04x\n", cnt, *iptr2);
270 if (IS_FWI2_CAPABLE(ha)) {
271 mb0 = RD_REG_WORD(®->isp24.mailbox0);
272 ictrl = RD_REG_DWORD(®->isp24.ictrl);
274 mb0 = RD_MAILBOX_REG(ha, ®->isp, 0);
275 ictrl = RD_REG_WORD(®->isp.ictrl);
277 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
278 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
279 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
280 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
283 * Attempt to capture a firmware dump for further analysis
284 * of the current firmware state. We do not need to do this
285 * if we are intentionally generating a dump.
287 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
288 ha->isp_ops->fw_dump(vha, 0);
290 rval = QLA_FUNCTION_TIMEOUT;
293 ha->flags.mbox_busy = 0;
298 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
299 ql_dbg(ql_dbg_mbx, vha, 0x101a,
300 "Checking for additional resp interrupt.\n");
302 /* polling mode for non isp_abort commands. */
303 qla2x00_poll(ha->rsp_q_map[0]);
306 if (rval == QLA_FUNCTION_TIMEOUT &&
307 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
308 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
309 ha->flags.eeh_busy) {
310 /* not in dpc. schedule it for dpc to take over. */
311 ql_dbg(ql_dbg_mbx, vha, 0x101b,
312 "Timeout, schedule isp_abort_needed.\n");
314 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
315 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
316 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
317 if (IS_QLA82XX(ha)) {
318 ql_dbg(ql_dbg_mbx, vha, 0x112a,
319 "disabling pause transmit on port "
322 QLA82XX_CRB_NIU + 0x98,
323 CRB_NIU_XG_PAUSE_CTL_P0|
324 CRB_NIU_XG_PAUSE_CTL_P1);
326 ql_log(ql_log_info, base_vha, 0x101c,
327 "Mailbox cmd timeout occurred, cmd=0x%x, "
328 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
329 "abort.\n", command, mcp->mb[0],
331 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
332 qla2xxx_wake_dpc(vha);
334 } else if (!abort_active) {
335 /* call abort directly since we are in the DPC thread */
336 ql_dbg(ql_dbg_mbx, vha, 0x101d,
337 "Timeout, calling abort_isp.\n");
339 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
340 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
341 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
342 if (IS_QLA82XX(ha)) {
343 ql_dbg(ql_dbg_mbx, vha, 0x112b,
344 "disabling pause transmit on port "
347 QLA82XX_CRB_NIU + 0x98,
348 CRB_NIU_XG_PAUSE_CTL_P0|
349 CRB_NIU_XG_PAUSE_CTL_P1);
351 ql_log(ql_log_info, base_vha, 0x101e,
352 "Mailbox cmd timeout occurred, cmd=0x%x, "
353 "mb[0]=0x%x. Scheduling ISP abort ",
354 command, mcp->mb[0]);
355 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
356 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
357 /* Allow next mbx cmd to come in. */
358 complete(&ha->mbx_cmd_comp);
359 if (ha->isp_ops->abort_isp(vha)) {
360 /* Failed. retry later. */
361 set_bit(ISP_ABORT_NEEDED,
364 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
365 ql_dbg(ql_dbg_mbx, vha, 0x101f,
366 "Finished abort_isp.\n");
373 /* Allow next mbx cmd to come in. */
374 complete(&ha->mbx_cmd_comp);
378 ql_dbg(ql_dbg_disc, base_vha, 0x1020,
379 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
380 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
382 ql_dbg(ql_dbg_disc, vha, 0x1115,
383 "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
384 RD_REG_DWORD(®->isp24.host_status),
385 ha->fw_dump_cap_flags,
386 RD_REG_DWORD(®->isp24.ictrl),
387 RD_REG_DWORD(®->isp24.istatus));
389 mbx_reg = ®->isp24.mailbox0;
390 for (i = 0; i < 6; i++)
391 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
392 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
394 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
401 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
402 uint32_t risc_code_size)
405 struct qla_hw_data *ha = vha->hw;
407 mbx_cmd_t *mcp = &mc;
409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
410 "Entered %s.\n", __func__);
412 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
413 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
414 mcp->mb[8] = MSW(risc_addr);
415 mcp->out_mb = MBX_8|MBX_0;
417 mcp->mb[0] = MBC_LOAD_RISC_RAM;
420 mcp->mb[1] = LSW(risc_addr);
421 mcp->mb[2] = MSW(req_dma);
422 mcp->mb[3] = LSW(req_dma);
423 mcp->mb[6] = MSW(MSD(req_dma));
424 mcp->mb[7] = LSW(MSD(req_dma));
425 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
426 if (IS_FWI2_CAPABLE(ha)) {
427 mcp->mb[4] = MSW(risc_code_size);
428 mcp->mb[5] = LSW(risc_code_size);
429 mcp->out_mb |= MBX_5|MBX_4;
431 mcp->mb[4] = LSW(risc_code_size);
432 mcp->out_mb |= MBX_4;
436 mcp->tov = MBX_TOV_SECONDS;
438 rval = qla2x00_mailbox_command(vha, mcp);
440 if (rval != QLA_SUCCESS) {
441 ql_dbg(ql_dbg_mbx, vha, 0x1023,
442 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
444 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
445 "Done %s.\n", __func__);
451 #define EXTENDED_BB_CREDITS BIT_0
454 * Start adapter firmware.
457 * ha = adapter block pointer.
458 * TARGET_QUEUE_LOCK must be released.
459 * ADAPTER_STATE_LOCK must be released.
462 * qla2x00 local function return status code.
468 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
471 struct qla_hw_data *ha = vha->hw;
473 mbx_cmd_t *mcp = &mc;
475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
476 "Entered %s.\n", __func__);
478 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
481 if (IS_FWI2_CAPABLE(ha)) {
482 mcp->mb[1] = MSW(risc_addr);
483 mcp->mb[2] = LSW(risc_addr);
485 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
487 struct nvram_81xx *nv = ha->nvram;
488 mcp->mb[4] = (nv->enhanced_features &
489 EXTENDED_BB_CREDITS);
492 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
495 mcp->mb[1] = LSW(risc_addr);
496 mcp->out_mb |= MBX_1;
497 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
499 mcp->out_mb |= MBX_2;
503 mcp->tov = MBX_TOV_SECONDS;
505 rval = qla2x00_mailbox_command(vha, mcp);
507 if (rval != QLA_SUCCESS) {
508 ql_dbg(ql_dbg_mbx, vha, 0x1026,
509 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
511 if (IS_FWI2_CAPABLE(ha)) {
512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
513 "Done exchanges=%x.\n", mcp->mb[1]);
515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
516 "Done %s.\n", __func__);
524 * qla2x00_get_fw_version
525 * Get firmware version.
528 * ha: adapter state pointer.
529 * major: pointer for major number.
530 * minor: pointer for minor number.
531 * subminor: pointer for subminor number.
534 * qla2x00 local function return status code.
540 qla2x00_get_fw_version(scsi_qla_host_t *vha)
544 mbx_cmd_t *mcp = &mc;
545 struct qla_hw_data *ha = vha->hw;
547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
548 "Entered %s.\n", __func__);
550 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
552 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
553 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
554 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
555 if (IS_FWI2_CAPABLE(ha))
556 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
558 mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
559 MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
562 mcp->tov = MBX_TOV_SECONDS;
563 rval = qla2x00_mailbox_command(vha, mcp);
564 if (rval != QLA_SUCCESS)
567 /* Return mailbox data. */
568 ha->fw_major_version = mcp->mb[1];
569 ha->fw_minor_version = mcp->mb[2];
570 ha->fw_subminor_version = mcp->mb[3];
571 ha->fw_attributes = mcp->mb[6];
572 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
573 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
575 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
577 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
578 ha->mpi_version[0] = mcp->mb[10] & 0xff;
579 ha->mpi_version[1] = mcp->mb[11] >> 8;
580 ha->mpi_version[2] = mcp->mb[11] & 0xff;
581 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
582 ha->phy_version[0] = mcp->mb[8] & 0xff;
583 ha->phy_version[1] = mcp->mb[9] >> 8;
584 ha->phy_version[2] = mcp->mb[9] & 0xff;
587 if (IS_FWI2_CAPABLE(ha)) {
588 ha->fw_attributes_h = mcp->mb[15];
589 ha->fw_attributes_ext[0] = mcp->mb[16];
590 ha->fw_attributes_ext[1] = mcp->mb[17];
591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
592 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
593 __func__, mcp->mb[15], mcp->mb[6]);
594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
595 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
596 __func__, mcp->mb[17], mcp->mb[16]);
599 if (IS_QLA27XX(ha)) {
600 ha->mpi_version[0] = mcp->mb[10] & 0xff;
601 ha->mpi_version[1] = mcp->mb[11] >> 8;
602 ha->mpi_version[2] = mcp->mb[11] & 0xff;
603 ha->pep_version[0] = mcp->mb[13] & 0xff;
604 ha->pep_version[1] = mcp->mb[14] >> 8;
605 ha->pep_version[2] = mcp->mb[14] & 0xff;
606 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
607 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
611 if (rval != QLA_SUCCESS) {
613 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
617 "Done %s.\n", __func__);
623 * qla2x00_get_fw_options
624 * Set firmware options.
627 * ha = adapter block pointer.
628 * fwopt = pointer for firmware options.
631 * qla2x00 local function return status code.
637 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
641 mbx_cmd_t *mcp = &mc;
643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
644 "Entered %s.\n", __func__);
646 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
648 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
649 mcp->tov = MBX_TOV_SECONDS;
651 rval = qla2x00_mailbox_command(vha, mcp);
653 if (rval != QLA_SUCCESS) {
655 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
657 fwopts[0] = mcp->mb[0];
658 fwopts[1] = mcp->mb[1];
659 fwopts[2] = mcp->mb[2];
660 fwopts[3] = mcp->mb[3];
662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
663 "Done %s.\n", __func__);
671 * qla2x00_set_fw_options
672 * Set firmware options.
675 * ha = adapter block pointer.
676 * fwopt = pointer for firmware options.
679 * qla2x00 local function return status code.
685 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
689 mbx_cmd_t *mcp = &mc;
691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
692 "Entered %s.\n", __func__);
694 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
695 mcp->mb[1] = fwopts[1];
696 mcp->mb[2] = fwopts[2];
697 mcp->mb[3] = fwopts[3];
698 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
700 if (IS_FWI2_CAPABLE(vha->hw)) {
703 mcp->mb[10] = fwopts[10];
704 mcp->mb[11] = fwopts[11];
705 mcp->mb[12] = 0; /* Undocumented, but used */
706 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
708 mcp->tov = MBX_TOV_SECONDS;
710 rval = qla2x00_mailbox_command(vha, mcp);
712 fwopts[0] = mcp->mb[0];
714 if (rval != QLA_SUCCESS) {
716 ql_dbg(ql_dbg_mbx, vha, 0x1030,
717 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
720 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
721 "Done %s.\n", __func__);
728 * qla2x00_mbx_reg_test
729 * Mailbox register wrap test.
732 * ha = adapter block pointer.
733 * TARGET_QUEUE_LOCK must be released.
734 * ADAPTER_STATE_LOCK must be released.
737 * qla2x00 local function return status code.
743 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
747 mbx_cmd_t *mcp = &mc;
749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
750 "Entered %s.\n", __func__);
752 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
760 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
761 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
762 mcp->tov = MBX_TOV_SECONDS;
764 rval = qla2x00_mailbox_command(vha, mcp);
766 if (rval == QLA_SUCCESS) {
767 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
768 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
769 rval = QLA_FUNCTION_FAILED;
770 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
771 mcp->mb[7] != 0x2525)
772 rval = QLA_FUNCTION_FAILED;
775 if (rval != QLA_SUCCESS) {
777 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
781 "Done %s.\n", __func__);
788 * qla2x00_verify_checksum
789 * Verify firmware checksum.
792 * ha = adapter block pointer.
793 * TARGET_QUEUE_LOCK must be released.
794 * ADAPTER_STATE_LOCK must be released.
797 * qla2x00 local function return status code.
803 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
807 mbx_cmd_t *mcp = &mc;
809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
810 "Entered %s.\n", __func__);
812 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
815 if (IS_FWI2_CAPABLE(vha->hw)) {
816 mcp->mb[1] = MSW(risc_addr);
817 mcp->mb[2] = LSW(risc_addr);
818 mcp->out_mb |= MBX_2|MBX_1;
819 mcp->in_mb |= MBX_2|MBX_1;
821 mcp->mb[1] = LSW(risc_addr);
822 mcp->out_mb |= MBX_1;
826 mcp->tov = MBX_TOV_SECONDS;
828 rval = qla2x00_mailbox_command(vha, mcp);
830 if (rval != QLA_SUCCESS) {
831 ql_dbg(ql_dbg_mbx, vha, 0x1036,
832 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
833 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
836 "Done %s.\n", __func__);
844 * Issue IOCB using mailbox command
847 * ha = adapter state pointer.
848 * buffer = buffer pointer.
849 * phys_addr = physical address of buffer.
850 * size = size of buffer.
851 * TARGET_QUEUE_LOCK must be released.
852 * ADAPTER_STATE_LOCK must be released.
855 * qla2x00 local function return status code.
861 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
862 dma_addr_t phys_addr, size_t size, uint32_t tov)
866 mbx_cmd_t *mcp = &mc;
868 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
869 "Entered %s.\n", __func__);
871 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
873 mcp->mb[2] = MSW(phys_addr);
874 mcp->mb[3] = LSW(phys_addr);
875 mcp->mb[6] = MSW(MSD(phys_addr));
876 mcp->mb[7] = LSW(MSD(phys_addr));
877 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
878 mcp->in_mb = MBX_2|MBX_0;
881 rval = qla2x00_mailbox_command(vha, mcp);
883 if (rval != QLA_SUCCESS) {
885 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
887 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
889 /* Mask reserved bits. */
890 sts_entry->entry_status &=
891 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
893 "Done %s.\n", __func__);
900 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
903 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
908 * qla2x00_abort_command
909 * Abort command aborts a specified IOCB.
912 * ha = adapter block pointer.
913 * sp = SB structure pointer.
916 * qla2x00 local function return status code.
922 qla2x00_abort_command(srb_t *sp)
924 unsigned long flags = 0;
928 mbx_cmd_t *mcp = &mc;
929 fc_port_t *fcport = sp->fcport;
930 scsi_qla_host_t *vha = fcport->vha;
931 struct qla_hw_data *ha = vha->hw;
932 struct req_que *req = vha->req;
933 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
936 "Entered %s.\n", __func__);
938 spin_lock_irqsave(&ha->hardware_lock, flags);
939 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
940 if (req->outstanding_cmds[handle] == sp)
943 spin_unlock_irqrestore(&ha->hardware_lock, flags);
945 if (handle == req->num_outstanding_cmds) {
946 /* command not found */
947 return QLA_FUNCTION_FAILED;
950 mcp->mb[0] = MBC_ABORT_COMMAND;
951 if (HAS_EXTENDED_IDS(ha))
952 mcp->mb[1] = fcport->loop_id;
954 mcp->mb[1] = fcport->loop_id << 8;
955 mcp->mb[2] = (uint16_t)handle;
956 mcp->mb[3] = (uint16_t)(handle >> 16);
957 mcp->mb[6] = (uint16_t)cmd->device->lun;
958 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
960 mcp->tov = MBX_TOV_SECONDS;
962 rval = qla2x00_mailbox_command(vha, mcp);
964 if (rval != QLA_SUCCESS) {
965 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
968 "Done %s.\n", __func__);
975 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
979 mbx_cmd_t *mcp = &mc;
980 scsi_qla_host_t *vha;
987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
988 "Entered %s.\n", __func__);
990 req = vha->hw->req_q_map[0];
992 mcp->mb[0] = MBC_ABORT_TARGET;
993 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
994 if (HAS_EXTENDED_IDS(vha->hw)) {
995 mcp->mb[1] = fcport->loop_id;
997 mcp->out_mb |= MBX_10;
999 mcp->mb[1] = fcport->loop_id << 8;
1001 mcp->mb[2] = vha->hw->loop_reset_delay;
1002 mcp->mb[9] = vha->vp_idx;
1005 mcp->tov = MBX_TOV_SECONDS;
1007 rval = qla2x00_mailbox_command(vha, mcp);
1008 if (rval != QLA_SUCCESS) {
1009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1010 "Failed=%x.\n", rval);
1013 /* Issue marker IOCB. */
1014 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1016 if (rval2 != QLA_SUCCESS) {
1017 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1018 "Failed to issue marker IOCB (%x).\n", rval2);
1020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1021 "Done %s.\n", __func__);
1028 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1032 mbx_cmd_t *mcp = &mc;
1033 scsi_qla_host_t *vha;
1034 struct req_que *req;
1035 struct rsp_que *rsp;
1039 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1040 "Entered %s.\n", __func__);
1042 req = vha->hw->req_q_map[0];
1044 mcp->mb[0] = MBC_LUN_RESET;
1045 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1046 if (HAS_EXTENDED_IDS(vha->hw))
1047 mcp->mb[1] = fcport->loop_id;
1049 mcp->mb[1] = fcport->loop_id << 8;
1050 mcp->mb[2] = (u32)l;
1052 mcp->mb[9] = vha->vp_idx;
1055 mcp->tov = MBX_TOV_SECONDS;
1057 rval = qla2x00_mailbox_command(vha, mcp);
1058 if (rval != QLA_SUCCESS) {
1059 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1062 /* Issue marker IOCB. */
1063 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1065 if (rval2 != QLA_SUCCESS) {
1066 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1067 "Failed to issue marker IOCB (%x).\n", rval2);
1069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1070 "Done %s.\n", __func__);
1077 * qla2x00_get_adapter_id
1078 * Get adapter ID and topology.
1081 * ha = adapter block pointer.
1082 * id = pointer for loop ID.
1083 * al_pa = pointer for AL_PA.
1084 * area = pointer for area.
1085 * domain = pointer for domain.
1086 * top = pointer for topology.
1087 * TARGET_QUEUE_LOCK must be released.
1088 * ADAPTER_STATE_LOCK must be released.
1091 * qla2x00 local function return status code.
1097 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1098 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1102 mbx_cmd_t *mcp = &mc;
1104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1105 "Entered %s.\n", __func__);
1107 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1108 mcp->mb[9] = vha->vp_idx;
1109 mcp->out_mb = MBX_9|MBX_0;
1110 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1111 if (IS_CNA_CAPABLE(vha->hw))
1112 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1113 if (IS_FWI2_CAPABLE(vha->hw))
1114 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1115 mcp->tov = MBX_TOV_SECONDS;
1117 rval = qla2x00_mailbox_command(vha, mcp);
1118 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1119 rval = QLA_COMMAND_ERROR;
1120 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1121 rval = QLA_INVALID_COMMAND;
1125 *al_pa = LSB(mcp->mb[2]);
1126 *area = MSB(mcp->mb[2]);
1127 *domain = LSB(mcp->mb[3]);
1129 *sw_cap = mcp->mb[7];
1131 if (rval != QLA_SUCCESS) {
1133 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1135 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1136 "Done %s.\n", __func__);
1138 if (IS_CNA_CAPABLE(vha->hw)) {
1139 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1140 vha->fcoe_fcf_idx = mcp->mb[10];
1141 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1142 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1143 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1144 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1145 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1146 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1148 /* If FA-WWN supported */
1149 if (IS_FAWWN_CAPABLE(vha->hw)) {
1150 if (mcp->mb[7] & BIT_14) {
1151 vha->port_name[0] = MSB(mcp->mb[16]);
1152 vha->port_name[1] = LSB(mcp->mb[16]);
1153 vha->port_name[2] = MSB(mcp->mb[17]);
1154 vha->port_name[3] = LSB(mcp->mb[17]);
1155 vha->port_name[4] = MSB(mcp->mb[18]);
1156 vha->port_name[5] = LSB(mcp->mb[18]);
1157 vha->port_name[6] = MSB(mcp->mb[19]);
1158 vha->port_name[7] = LSB(mcp->mb[19]);
1159 fc_host_port_name(vha->host) =
1160 wwn_to_u64(vha->port_name);
1161 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1162 "FA-WWN acquired %016llx\n",
1163 wwn_to_u64(vha->port_name));
1172 * qla2x00_get_retry_cnt
1173 * Get current firmware login retry count and delay.
1176 * ha = adapter block pointer.
1177 * retry_cnt = pointer to login retry count.
1178 * tov = pointer to login timeout value.
1181 * qla2x00 local function return status code.
1187 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1193 mbx_cmd_t *mcp = &mc;
1195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1196 "Entered %s.\n", __func__);
1198 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1199 mcp->out_mb = MBX_0;
1200 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1201 mcp->tov = MBX_TOV_SECONDS;
1203 rval = qla2x00_mailbox_command(vha, mcp);
1205 if (rval != QLA_SUCCESS) {
1207 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1208 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1210 /* Convert returned data and check our values. */
1211 *r_a_tov = mcp->mb[3] / 2;
1212 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1213 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1214 /* Update to the larger values */
1215 *retry_cnt = (uint8_t)mcp->mb[1];
1219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1220 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1227 * qla2x00_init_firmware
1228 * Initialize adapter firmware.
1231 * ha = adapter block pointer.
1232 * dptr = Initialization control block pointer.
1233 * size = size of initialization control block.
1234 * TARGET_QUEUE_LOCK must be released.
1235 * ADAPTER_STATE_LOCK must be released.
1238 * qla2x00 local function return status code.
1244 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1248 mbx_cmd_t *mcp = &mc;
1249 struct qla_hw_data *ha = vha->hw;
1251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1252 "Entered %s.\n", __func__);
1254 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1255 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1256 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1258 if (ha->flags.npiv_supported)
1259 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1261 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1264 mcp->mb[2] = MSW(ha->init_cb_dma);
1265 mcp->mb[3] = LSW(ha->init_cb_dma);
1266 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1267 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1268 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1269 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1271 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1272 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1273 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1274 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1275 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1276 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1278 /* 1 and 2 should normally be captured. */
1279 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1280 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1281 /* mb3 is additional info about the installed SFP. */
1282 mcp->in_mb |= MBX_3;
1283 mcp->buf_size = size;
1284 mcp->flags = MBX_DMA_OUT;
1285 mcp->tov = MBX_TOV_SECONDS;
1286 rval = qla2x00_mailbox_command(vha, mcp);
1288 if (rval != QLA_SUCCESS) {
1290 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1291 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1292 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1296 "Done %s.\n", __func__);
1303 * qla2x00_get_node_name_list
1304 * Issue get node name list mailbox command, kmalloc()
1305 * and return the resulting list. Caller must kfree() it!
1308 * ha = adapter state pointer.
1309 * out_data = resulting list
1310 * out_len = length of the resulting list
1313 * qla2x00 local function return status code.
1319 qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1321 struct qla_hw_data *ha = vha->hw;
1322 struct qla_port_24xx_data *list = NULL;
1325 dma_addr_t pmap_dma;
1331 dma_size = left * sizeof(*list);
1332 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1333 &pmap_dma, GFP_KERNEL);
1335 ql_log(ql_log_warn, vha, 0x113f,
1336 "%s(%ld): DMA Alloc failed of %ld\n",
1337 __func__, vha->host_no, dma_size);
1338 rval = QLA_MEMORY_ALLOC_FAILED;
1342 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1343 mc.mb[1] = BIT_1 | BIT_3;
1344 mc.mb[2] = MSW(pmap_dma);
1345 mc.mb[3] = LSW(pmap_dma);
1346 mc.mb[6] = MSW(MSD(pmap_dma));
1347 mc.mb[7] = LSW(MSD(pmap_dma));
1348 mc.mb[8] = dma_size;
1349 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1350 mc.in_mb = MBX_0|MBX_1;
1352 mc.flags = MBX_DMA_IN;
1354 rval = qla2x00_mailbox_command(vha, &mc);
1355 if (rval != QLA_SUCCESS) {
1356 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1357 (mc.mb[1] == 0xA)) {
1358 left += le16_to_cpu(mc.mb[2]) /
1359 sizeof(struct qla_port_24xx_data);
1367 list = kmemdup(pmap, dma_size, GFP_KERNEL);
1369 ql_log(ql_log_warn, vha, 0x1140,
1370 "%s(%ld): failed to allocate node names list "
1371 "structure.\n", __func__, vha->host_no);
1372 rval = QLA_MEMORY_ALLOC_FAILED;
1377 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1381 *out_len = dma_size;
1387 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1392 * qla2x00_get_port_database
1393 * Issue normal/enhanced get port database mailbox command
1394 * and copy device name as necessary.
1397 * ha = adapter state pointer.
1398 * dev = structure pointer.
1399 * opt = enhanced cmd option byte.
1402 * qla2x00 local function return status code.
1408 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1412 mbx_cmd_t *mcp = &mc;
1413 port_database_t *pd;
1414 struct port_database_24xx *pd24;
1416 struct qla_hw_data *ha = vha->hw;
1418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1419 "Entered %s.\n", __func__);
1422 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1424 ql_log(ql_log_warn, vha, 0x1050,
1425 "Failed to allocate port database structure.\n");
1426 return QLA_MEMORY_ALLOC_FAILED;
1428 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1430 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1431 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1432 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1433 mcp->mb[2] = MSW(pd_dma);
1434 mcp->mb[3] = LSW(pd_dma);
1435 mcp->mb[6] = MSW(MSD(pd_dma));
1436 mcp->mb[7] = LSW(MSD(pd_dma));
1437 mcp->mb[9] = vha->vp_idx;
1438 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1440 if (IS_FWI2_CAPABLE(ha)) {
1441 mcp->mb[1] = fcport->loop_id;
1443 mcp->out_mb |= MBX_10|MBX_1;
1444 mcp->in_mb |= MBX_1;
1445 } else if (HAS_EXTENDED_IDS(ha)) {
1446 mcp->mb[1] = fcport->loop_id;
1448 mcp->out_mb |= MBX_10|MBX_1;
1450 mcp->mb[1] = fcport->loop_id << 8 | opt;
1451 mcp->out_mb |= MBX_1;
1453 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1454 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1455 mcp->flags = MBX_DMA_IN;
1456 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1457 rval = qla2x00_mailbox_command(vha, mcp);
1458 if (rval != QLA_SUCCESS)
1461 if (IS_FWI2_CAPABLE(ha)) {
1463 pd24 = (struct port_database_24xx *) pd;
1465 /* Check for logged in state. */
1466 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1467 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1468 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1469 "Unable to verify login-state (%x/%x) for "
1470 "loop_id %x.\n", pd24->current_login_state,
1471 pd24->last_login_state, fcport->loop_id);
1472 rval = QLA_FUNCTION_FAILED;
1476 if (fcport->loop_id == FC_NO_LOOP_ID ||
1477 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1478 memcmp(fcport->port_name, pd24->port_name, 8))) {
1479 /* We lost the device mid way. */
1480 rval = QLA_NOT_LOGGED_IN;
1484 /* Names are little-endian. */
1485 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1486 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1488 /* Get port_id of device. */
1489 fcport->d_id.b.domain = pd24->port_id[0];
1490 fcport->d_id.b.area = pd24->port_id[1];
1491 fcport->d_id.b.al_pa = pd24->port_id[2];
1492 fcport->d_id.b.rsvd_1 = 0;
1494 /* If not target must be initiator or unknown type. */
1495 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1496 fcport->port_type = FCT_INITIATOR;
1498 fcport->port_type = FCT_TARGET;
1500 /* Passback COS information. */
1501 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1502 FC_COS_CLASS2 : FC_COS_CLASS3;
1504 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1505 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1509 /* Check for logged in state. */
1510 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1511 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1512 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1513 "Unable to verify login-state (%x/%x) - "
1514 "portid=%02x%02x%02x.\n", pd->master_state,
1515 pd->slave_state, fcport->d_id.b.domain,
1516 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1517 rval = QLA_FUNCTION_FAILED;
1521 if (fcport->loop_id == FC_NO_LOOP_ID ||
1522 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1523 memcmp(fcport->port_name, pd->port_name, 8))) {
1524 /* We lost the device mid way. */
1525 rval = QLA_NOT_LOGGED_IN;
1529 /* Names are little-endian. */
1530 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1531 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1533 /* Get port_id of device. */
1534 fcport->d_id.b.domain = pd->port_id[0];
1535 fcport->d_id.b.area = pd->port_id[3];
1536 fcport->d_id.b.al_pa = pd->port_id[2];
1537 fcport->d_id.b.rsvd_1 = 0;
1539 /* If not target must be initiator or unknown type. */
1540 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1541 fcport->port_type = FCT_INITIATOR;
1543 fcport->port_type = FCT_TARGET;
1545 /* Passback COS information. */
1546 fcport->supported_classes = (pd->options & BIT_4) ?
1547 FC_COS_CLASS2: FC_COS_CLASS3;
1551 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1553 if (rval != QLA_SUCCESS) {
1554 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1555 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1556 mcp->mb[0], mcp->mb[1]);
1558 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1559 "Done %s.\n", __func__);
1566 * qla2x00_get_firmware_state
1567 * Get adapter firmware state.
1570 * ha = adapter block pointer.
1571 * dptr = pointer for firmware state.
1572 * TARGET_QUEUE_LOCK must be released.
1573 * ADAPTER_STATE_LOCK must be released.
1576 * qla2x00 local function return status code.
1582 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1586 mbx_cmd_t *mcp = &mc;
1588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1589 "Entered %s.\n", __func__);
1591 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1592 mcp->out_mb = MBX_0;
1593 if (IS_FWI2_CAPABLE(vha->hw))
1594 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1596 mcp->in_mb = MBX_1|MBX_0;
1597 mcp->tov = MBX_TOV_SECONDS;
1599 rval = qla2x00_mailbox_command(vha, mcp);
1601 /* Return firmware states. */
1602 states[0] = mcp->mb[1];
1603 if (IS_FWI2_CAPABLE(vha->hw)) {
1604 states[1] = mcp->mb[2];
1605 states[2] = mcp->mb[3];
1606 states[3] = mcp->mb[4];
1607 states[4] = mcp->mb[5];
1608 states[5] = mcp->mb[6]; /* DPORT status */
1611 if (rval != QLA_SUCCESS) {
1613 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1617 "Done %s.\n", __func__);
1624 * qla2x00_get_port_name
1625 * Issue get port name mailbox command.
1626 * Returned name is in big endian format.
1629 * ha = adapter block pointer.
1630 * loop_id = loop ID of device.
1631 * name = pointer for name.
1632 * TARGET_QUEUE_LOCK must be released.
1633 * ADAPTER_STATE_LOCK must be released.
1636 * qla2x00 local function return status code.
1642 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1647 mbx_cmd_t *mcp = &mc;
1649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1650 "Entered %s.\n", __func__);
1652 mcp->mb[0] = MBC_GET_PORT_NAME;
1653 mcp->mb[9] = vha->vp_idx;
1654 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1655 if (HAS_EXTENDED_IDS(vha->hw)) {
1656 mcp->mb[1] = loop_id;
1658 mcp->out_mb |= MBX_10;
1660 mcp->mb[1] = loop_id << 8 | opt;
1663 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1664 mcp->tov = MBX_TOV_SECONDS;
1666 rval = qla2x00_mailbox_command(vha, mcp);
1668 if (rval != QLA_SUCCESS) {
1670 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1673 /* This function returns name in big endian. */
1674 name[0] = MSB(mcp->mb[2]);
1675 name[1] = LSB(mcp->mb[2]);
1676 name[2] = MSB(mcp->mb[3]);
1677 name[3] = LSB(mcp->mb[3]);
1678 name[4] = MSB(mcp->mb[6]);
1679 name[5] = LSB(mcp->mb[6]);
1680 name[6] = MSB(mcp->mb[7]);
1681 name[7] = LSB(mcp->mb[7]);
1684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1685 "Done %s.\n", __func__);
1692 * qla24xx_link_initialization
1693 * Issue link initialization mailbox command.
1696 * ha = adapter block pointer.
1697 * TARGET_QUEUE_LOCK must be released.
1698 * ADAPTER_STATE_LOCK must be released.
1701 * qla2x00 local function return status code.
1707 qla24xx_link_initialize(scsi_qla_host_t *vha)
1711 mbx_cmd_t *mcp = &mc;
1713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
1714 "Entered %s.\n", __func__);
1716 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
1717 return QLA_FUNCTION_FAILED;
1719 mcp->mb[0] = MBC_LINK_INITIALIZATION;
1721 if (vha->hw->operating_mode == LOOP)
1722 mcp->mb[1] |= BIT_6;
1724 mcp->mb[1] |= BIT_5;
1727 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1729 mcp->tov = MBX_TOV_SECONDS;
1731 rval = qla2x00_mailbox_command(vha, mcp);
1733 if (rval != QLA_SUCCESS) {
1734 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
1736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
1737 "Done %s.\n", __func__);
1745 * Issue LIP reset mailbox command.
1748 * ha = adapter block pointer.
1749 * TARGET_QUEUE_LOCK must be released.
1750 * ADAPTER_STATE_LOCK must be released.
1753 * qla2x00 local function return status code.
1759 qla2x00_lip_reset(scsi_qla_host_t *vha)
1763 mbx_cmd_t *mcp = &mc;
1765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1766 "Entered %s.\n", __func__);
1768 if (IS_CNA_CAPABLE(vha->hw)) {
1769 /* Logout across all FCFs. */
1770 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1773 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1774 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1775 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1778 mcp->mb[3] = vha->hw->loop_reset_delay;
1779 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1781 mcp->mb[0] = MBC_LIP_RESET;
1782 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1783 if (HAS_EXTENDED_IDS(vha->hw)) {
1784 mcp->mb[1] = 0x00ff;
1786 mcp->out_mb |= MBX_10;
1788 mcp->mb[1] = 0xff00;
1790 mcp->mb[2] = vha->hw->loop_reset_delay;
1794 mcp->tov = MBX_TOV_SECONDS;
1796 rval = qla2x00_mailbox_command(vha, mcp);
1798 if (rval != QLA_SUCCESS) {
1800 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1804 "Done %s.\n", __func__);
1815 * ha = adapter block pointer.
1816 * sns = pointer for command.
1817 * cmd_size = command size.
1818 * buf_size = response/command size.
1819 * TARGET_QUEUE_LOCK must be released.
1820 * ADAPTER_STATE_LOCK must be released.
1823 * qla2x00 local function return status code.
1829 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1830 uint16_t cmd_size, size_t buf_size)
1834 mbx_cmd_t *mcp = &mc;
1836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1837 "Entered %s.\n", __func__);
1839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1840 "Retry cnt=%d ratov=%d total tov=%d.\n",
1841 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1843 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1844 mcp->mb[1] = cmd_size;
1845 mcp->mb[2] = MSW(sns_phys_address);
1846 mcp->mb[3] = LSW(sns_phys_address);
1847 mcp->mb[6] = MSW(MSD(sns_phys_address));
1848 mcp->mb[7] = LSW(MSD(sns_phys_address));
1849 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1850 mcp->in_mb = MBX_0|MBX_1;
1851 mcp->buf_size = buf_size;
1852 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1853 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1854 rval = qla2x00_mailbox_command(vha, mcp);
1856 if (rval != QLA_SUCCESS) {
1858 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1859 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1860 rval, mcp->mb[0], mcp->mb[1]);
1863 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1864 "Done %s.\n", __func__);
1871 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1872 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1876 struct logio_entry_24xx *lg;
1879 struct qla_hw_data *ha = vha->hw;
1880 struct req_que *req;
1882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1883 "Entered %s.\n", __func__);
1885 if (ha->flags.cpu_affinity_enabled)
1886 req = ha->req_q_map[0];
1890 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1892 ql_log(ql_log_warn, vha, 0x1062,
1893 "Failed to allocate login IOCB.\n");
1894 return QLA_MEMORY_ALLOC_FAILED;
1896 memset(lg, 0, sizeof(struct logio_entry_24xx));
1898 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1899 lg->entry_count = 1;
1900 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1901 lg->nport_handle = cpu_to_le16(loop_id);
1902 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1904 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1906 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1907 lg->port_id[0] = al_pa;
1908 lg->port_id[1] = area;
1909 lg->port_id[2] = domain;
1910 lg->vp_index = vha->vp_idx;
1911 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
1912 (ha->r_a_tov / 10 * 2) + 2);
1913 if (rval != QLA_SUCCESS) {
1914 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1915 "Failed to issue login IOCB (%x).\n", rval);
1916 } else if (lg->entry_status != 0) {
1917 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1918 "Failed to complete IOCB -- error status (%x).\n",
1920 rval = QLA_FUNCTION_FAILED;
1921 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
1922 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1923 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1925 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1926 "Failed to complete IOCB -- completion status (%x) "
1927 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1931 case LSC_SCODE_PORTID_USED:
1932 mb[0] = MBS_PORT_ID_USED;
1933 mb[1] = LSW(iop[1]);
1935 case LSC_SCODE_NPORT_USED:
1936 mb[0] = MBS_LOOP_ID_USED;
1938 case LSC_SCODE_NOLINK:
1939 case LSC_SCODE_NOIOCB:
1940 case LSC_SCODE_NOXCB:
1941 case LSC_SCODE_CMD_FAILED:
1942 case LSC_SCODE_NOFABRIC:
1943 case LSC_SCODE_FW_NOT_READY:
1944 case LSC_SCODE_NOT_LOGGED_IN:
1945 case LSC_SCODE_NOPCB:
1946 case LSC_SCODE_ELS_REJECT:
1947 case LSC_SCODE_CMD_PARAM_ERR:
1948 case LSC_SCODE_NONPORT:
1949 case LSC_SCODE_LOGGED_IN:
1950 case LSC_SCODE_NOFLOGI_ACC:
1952 mb[0] = MBS_COMMAND_ERROR;
1956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1957 "Done %s.\n", __func__);
1959 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1961 mb[0] = MBS_COMMAND_COMPLETE;
1963 if (iop[0] & BIT_4) {
1969 /* Passback COS information. */
1971 if (lg->io_parameter[7] || lg->io_parameter[8])
1972 mb[10] |= BIT_0; /* Class 2. */
1973 if (lg->io_parameter[9] || lg->io_parameter[10])
1974 mb[10] |= BIT_1; /* Class 3. */
1975 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
1976 mb[10] |= BIT_7; /* Confirmed Completion
1981 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1987 * qla2x00_login_fabric
1988 * Issue login fabric port mailbox command.
1991 * ha = adapter block pointer.
1992 * loop_id = device loop ID.
1993 * domain = device domain.
1994 * area = device area.
1995 * al_pa = device AL_PA.
1996 * status = pointer for return status.
1997 * opt = command options.
1998 * TARGET_QUEUE_LOCK must be released.
1999 * ADAPTER_STATE_LOCK must be released.
2002 * qla2x00 local function return status code.
2008 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2009 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2013 mbx_cmd_t *mcp = &mc;
2014 struct qla_hw_data *ha = vha->hw;
2016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2017 "Entered %s.\n", __func__);
2019 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2020 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2021 if (HAS_EXTENDED_IDS(ha)) {
2022 mcp->mb[1] = loop_id;
2024 mcp->out_mb |= MBX_10;
2026 mcp->mb[1] = (loop_id << 8) | opt;
2028 mcp->mb[2] = domain;
2029 mcp->mb[3] = area << 8 | al_pa;
2031 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2032 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2034 rval = qla2x00_mailbox_command(vha, mcp);
2036 /* Return mailbox statuses. */
2043 /* COS retrieved from Get-Port-Database mailbox command. */
2047 if (rval != QLA_SUCCESS) {
2048 /* RLU tmp code: need to change main mailbox_command function to
2049 * return ok even when the mailbox completion value is not
2050 * SUCCESS. The caller needs to be responsible to interpret
2051 * the return values of this mailbox command if we're not
2052 * to change too much of the existing code.
2054 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2055 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2056 mcp->mb[0] == 0x4006)
2060 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2061 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2062 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2066 "Done %s.\n", __func__);
2073 * qla2x00_login_local_device
2074 * Issue login loop port mailbox command.
2077 * ha = adapter block pointer.
2078 * loop_id = device loop ID.
2079 * opt = command options.
2082 * Return status code.
2089 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2090 uint16_t *mb_ret, uint8_t opt)
2094 mbx_cmd_t *mcp = &mc;
2095 struct qla_hw_data *ha = vha->hw;
2097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2098 "Entered %s.\n", __func__);
2100 if (IS_FWI2_CAPABLE(ha))
2101 return qla24xx_login_fabric(vha, fcport->loop_id,
2102 fcport->d_id.b.domain, fcport->d_id.b.area,
2103 fcport->d_id.b.al_pa, mb_ret, opt);
2105 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2106 if (HAS_EXTENDED_IDS(ha))
2107 mcp->mb[1] = fcport->loop_id;
2109 mcp->mb[1] = fcport->loop_id << 8;
2111 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2112 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2113 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2115 rval = qla2x00_mailbox_command(vha, mcp);
2117 /* Return mailbox statuses. */
2118 if (mb_ret != NULL) {
2119 mb_ret[0] = mcp->mb[0];
2120 mb_ret[1] = mcp->mb[1];
2121 mb_ret[6] = mcp->mb[6];
2122 mb_ret[7] = mcp->mb[7];
2125 if (rval != QLA_SUCCESS) {
2126 /* AV tmp code: need to change main mailbox_command function to
2127 * return ok even when the mailbox completion value is not
2128 * SUCCESS. The caller needs to be responsible to interpret
2129 * the return values of this mailbox command if we're not
2130 * to change too much of the existing code.
2132 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2135 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2136 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2137 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2141 "Done %s.\n", __func__);
2148 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2149 uint8_t area, uint8_t al_pa)
2152 struct logio_entry_24xx *lg;
2154 struct qla_hw_data *ha = vha->hw;
2155 struct req_que *req;
2157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2158 "Entered %s.\n", __func__);
2160 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2162 ql_log(ql_log_warn, vha, 0x106e,
2163 "Failed to allocate logout IOCB.\n");
2164 return QLA_MEMORY_ALLOC_FAILED;
2166 memset(lg, 0, sizeof(struct logio_entry_24xx));
2168 if (ql2xmaxqueues > 1)
2169 req = ha->req_q_map[0];
2172 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2173 lg->entry_count = 1;
2174 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2175 lg->nport_handle = cpu_to_le16(loop_id);
2177 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2179 lg->port_id[0] = al_pa;
2180 lg->port_id[1] = area;
2181 lg->port_id[2] = domain;
2182 lg->vp_index = vha->vp_idx;
2183 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2184 (ha->r_a_tov / 10 * 2) + 2);
2185 if (rval != QLA_SUCCESS) {
2186 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2187 "Failed to issue logout IOCB (%x).\n", rval);
2188 } else if (lg->entry_status != 0) {
2189 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2190 "Failed to complete IOCB -- error status (%x).\n",
2192 rval = QLA_FUNCTION_FAILED;
2193 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2194 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2195 "Failed to complete IOCB -- completion status (%x) "
2196 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2197 le32_to_cpu(lg->io_parameter[0]),
2198 le32_to_cpu(lg->io_parameter[1]));
2201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2202 "Done %s.\n", __func__);
2205 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2211 * qla2x00_fabric_logout
2212 * Issue logout fabric port mailbox command.
2215 * ha = adapter block pointer.
2216 * loop_id = device loop ID.
2217 * TARGET_QUEUE_LOCK must be released.
2218 * ADAPTER_STATE_LOCK must be released.
2221 * qla2x00 local function return status code.
2227 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2228 uint8_t area, uint8_t al_pa)
2232 mbx_cmd_t *mcp = &mc;
2234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2235 "Entered %s.\n", __func__);
2237 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2238 mcp->out_mb = MBX_1|MBX_0;
2239 if (HAS_EXTENDED_IDS(vha->hw)) {
2240 mcp->mb[1] = loop_id;
2242 mcp->out_mb |= MBX_10;
2244 mcp->mb[1] = loop_id << 8;
2247 mcp->in_mb = MBX_1|MBX_0;
2248 mcp->tov = MBX_TOV_SECONDS;
2250 rval = qla2x00_mailbox_command(vha, mcp);
2252 if (rval != QLA_SUCCESS) {
2254 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2255 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2259 "Done %s.\n", __func__);
2266 * qla2x00_full_login_lip
2267 * Issue full login LIP mailbox command.
2270 * ha = adapter block pointer.
2271 * TARGET_QUEUE_LOCK must be released.
2272 * ADAPTER_STATE_LOCK must be released.
2275 * qla2x00 local function return status code.
2281 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2285 mbx_cmd_t *mcp = &mc;
2287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2288 "Entered %s.\n", __func__);
2290 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2291 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2294 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2296 mcp->tov = MBX_TOV_SECONDS;
2298 rval = qla2x00_mailbox_command(vha, mcp);
2300 if (rval != QLA_SUCCESS) {
2302 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2306 "Done %s.\n", __func__);
2313 * qla2x00_get_id_list
2316 * ha = adapter block pointer.
2319 * qla2x00 local function return status code.
2325 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2330 mbx_cmd_t *mcp = &mc;
2332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2333 "Entered %s.\n", __func__);
2335 if (id_list == NULL)
2336 return QLA_FUNCTION_FAILED;
2338 mcp->mb[0] = MBC_GET_ID_LIST;
2339 mcp->out_mb = MBX_0;
2340 if (IS_FWI2_CAPABLE(vha->hw)) {
2341 mcp->mb[2] = MSW(id_list_dma);
2342 mcp->mb[3] = LSW(id_list_dma);
2343 mcp->mb[6] = MSW(MSD(id_list_dma));
2344 mcp->mb[7] = LSW(MSD(id_list_dma));
2346 mcp->mb[9] = vha->vp_idx;
2347 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2349 mcp->mb[1] = MSW(id_list_dma);
2350 mcp->mb[2] = LSW(id_list_dma);
2351 mcp->mb[3] = MSW(MSD(id_list_dma));
2352 mcp->mb[6] = LSW(MSD(id_list_dma));
2353 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2355 mcp->in_mb = MBX_1|MBX_0;
2356 mcp->tov = MBX_TOV_SECONDS;
2358 rval = qla2x00_mailbox_command(vha, mcp);
2360 if (rval != QLA_SUCCESS) {
2362 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2364 *entries = mcp->mb[1];
2365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2366 "Done %s.\n", __func__);
2373 * qla2x00_get_resource_cnts
2374 * Get current firmware resource counts.
2377 * ha = adapter block pointer.
2380 * qla2x00 local function return status code.
2386 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2387 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2388 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2392 mbx_cmd_t *mcp = &mc;
2394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2395 "Entered %s.\n", __func__);
2397 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2398 mcp->out_mb = MBX_0;
2399 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2400 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2401 mcp->in_mb |= MBX_12;
2402 mcp->tov = MBX_TOV_SECONDS;
2404 rval = qla2x00_mailbox_command(vha, mcp);
2406 if (rval != QLA_SUCCESS) {
2408 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2409 "Failed mb[0]=%x.\n", mcp->mb[0]);
2411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2412 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2413 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2414 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2415 mcp->mb[11], mcp->mb[12]);
2418 *cur_xchg_cnt = mcp->mb[3];
2420 *orig_xchg_cnt = mcp->mb[6];
2422 *cur_iocb_cnt = mcp->mb[7];
2424 *orig_iocb_cnt = mcp->mb[10];
2425 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2426 *max_npiv_vports = mcp->mb[11];
2427 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2428 IS_QLA27XX(vha->hw)) && max_fcfs)
2429 *max_fcfs = mcp->mb[12];
2436 * qla2x00_get_fcal_position_map
2437 * Get FCAL (LILP) position map using mailbox command
2440 * ha = adapter state pointer.
2441 * pos_map = buffer pointer (can be NULL).
2444 * qla2x00 local function return status code.
2450 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2454 mbx_cmd_t *mcp = &mc;
2456 dma_addr_t pmap_dma;
2457 struct qla_hw_data *ha = vha->hw;
2459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2460 "Entered %s.\n", __func__);
2462 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2464 ql_log(ql_log_warn, vha, 0x1080,
2465 "Memory alloc failed.\n");
2466 return QLA_MEMORY_ALLOC_FAILED;
2468 memset(pmap, 0, FCAL_MAP_SIZE);
2470 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2471 mcp->mb[2] = MSW(pmap_dma);
2472 mcp->mb[3] = LSW(pmap_dma);
2473 mcp->mb[6] = MSW(MSD(pmap_dma));
2474 mcp->mb[7] = LSW(MSD(pmap_dma));
2475 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2476 mcp->in_mb = MBX_1|MBX_0;
2477 mcp->buf_size = FCAL_MAP_SIZE;
2478 mcp->flags = MBX_DMA_IN;
2479 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2480 rval = qla2x00_mailbox_command(vha, mcp);
2482 if (rval == QLA_SUCCESS) {
2483 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2484 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2485 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2486 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2490 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2492 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2494 if (rval != QLA_SUCCESS) {
2495 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2498 "Done %s.\n", __func__);
2505 * qla2x00_get_link_status
2508 * ha = adapter block pointer.
2509 * loop_id = device loop ID.
2510 * ret_buf = pointer to link status return buffer.
2514 * BIT_0 = mem alloc error.
2515 * BIT_1 = mailbox error.
2518 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2519 struct link_statistics *stats, dma_addr_t stats_dma)
2523 mbx_cmd_t *mcp = &mc;
2524 uint32_t *siter, *diter, dwords;
2525 struct qla_hw_data *ha = vha->hw;
2527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2528 "Entered %s.\n", __func__);
2530 mcp->mb[0] = MBC_GET_LINK_STATUS;
2531 mcp->mb[2] = MSW(stats_dma);
2532 mcp->mb[3] = LSW(stats_dma);
2533 mcp->mb[6] = MSW(MSD(stats_dma));
2534 mcp->mb[7] = LSW(MSD(stats_dma));
2535 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2537 if (IS_FWI2_CAPABLE(ha)) {
2538 mcp->mb[1] = loop_id;
2541 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2542 mcp->in_mb |= MBX_1;
2543 } else if (HAS_EXTENDED_IDS(ha)) {
2544 mcp->mb[1] = loop_id;
2546 mcp->out_mb |= MBX_10|MBX_1;
2548 mcp->mb[1] = loop_id << 8;
2549 mcp->out_mb |= MBX_1;
2551 mcp->tov = MBX_TOV_SECONDS;
2552 mcp->flags = IOCTL_CMD;
2553 rval = qla2x00_mailbox_command(vha, mcp);
2555 if (rval == QLA_SUCCESS) {
2556 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2557 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2558 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2559 rval = QLA_FUNCTION_FAILED;
2561 /* Copy over data -- firmware data is LE. */
2562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2563 "Done %s.\n", __func__);
2564 dwords = offsetof(struct link_statistics, unused1) / 4;
2565 siter = diter = &stats->link_fail_cnt;
2567 *diter++ = le32_to_cpu(*siter++);
2571 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2578 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2579 dma_addr_t stats_dma)
2583 mbx_cmd_t *mcp = &mc;
2584 uint32_t *siter, *diter, dwords;
2586 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2587 "Entered %s.\n", __func__);
2589 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2590 mcp->mb[2] = MSW(stats_dma);
2591 mcp->mb[3] = LSW(stats_dma);
2592 mcp->mb[6] = MSW(MSD(stats_dma));
2593 mcp->mb[7] = LSW(MSD(stats_dma));
2594 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2595 mcp->mb[9] = vha->vp_idx;
2597 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2598 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2599 mcp->tov = MBX_TOV_SECONDS;
2600 mcp->flags = IOCTL_CMD;
2601 rval = qla2x00_mailbox_command(vha, mcp);
2603 if (rval == QLA_SUCCESS) {
2604 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2605 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2606 "Failed mb[0]=%x.\n", mcp->mb[0]);
2607 rval = QLA_FUNCTION_FAILED;
2609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2610 "Done %s.\n", __func__);
2611 /* Copy over data -- firmware data is LE. */
2612 dwords = sizeof(struct link_statistics) / 4;
2613 siter = diter = &stats->link_fail_cnt;
2615 *diter++ = le32_to_cpu(*siter++);
2619 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2626 qla24xx_abort_command(srb_t *sp)
2629 unsigned long flags = 0;
2631 struct abort_entry_24xx *abt;
2634 fc_port_t *fcport = sp->fcport;
2635 struct scsi_qla_host *vha = fcport->vha;
2636 struct qla_hw_data *ha = vha->hw;
2637 struct req_que *req = vha->req;
2639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2640 "Entered %s.\n", __func__);
2642 if (ql2xasynctmfenable)
2643 return qla24xx_async_abort_command(sp);
2645 spin_lock_irqsave(&ha->hardware_lock, flags);
2646 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2647 if (req->outstanding_cmds[handle] == sp)
2650 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2651 if (handle == req->num_outstanding_cmds) {
2652 /* Command not found. */
2653 return QLA_FUNCTION_FAILED;
2656 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2658 ql_log(ql_log_warn, vha, 0x108d,
2659 "Failed to allocate abort IOCB.\n");
2660 return QLA_MEMORY_ALLOC_FAILED;
2662 memset(abt, 0, sizeof(struct abort_entry_24xx));
2664 abt->entry_type = ABORT_IOCB_TYPE;
2665 abt->entry_count = 1;
2666 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2667 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2668 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2669 abt->port_id[0] = fcport->d_id.b.al_pa;
2670 abt->port_id[1] = fcport->d_id.b.area;
2671 abt->port_id[2] = fcport->d_id.b.domain;
2672 abt->vp_index = fcport->vha->vp_idx;
2674 abt->req_que_no = cpu_to_le16(req->id);
2676 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2677 if (rval != QLA_SUCCESS) {
2678 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2679 "Failed to issue IOCB (%x).\n", rval);
2680 } else if (abt->entry_status != 0) {
2681 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2682 "Failed to complete IOCB -- error status (%x).\n",
2684 rval = QLA_FUNCTION_FAILED;
2685 } else if (abt->nport_handle != cpu_to_le16(0)) {
2686 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2687 "Failed to complete IOCB -- completion status (%x).\n",
2688 le16_to_cpu(abt->nport_handle));
2689 if (abt->nport_handle == CS_IOCB_ERROR)
2690 rval = QLA_FUNCTION_PARAMETER_ERROR;
2692 rval = QLA_FUNCTION_FAILED;
2694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2695 "Done %s.\n", __func__);
2698 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2703 struct tsk_mgmt_cmd {
2705 struct tsk_mgmt_entry tsk;
2706 struct sts_entry_24xx sts;
2711 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2712 uint64_t l, int tag)
2715 struct tsk_mgmt_cmd *tsk;
2716 struct sts_entry_24xx *sts;
2718 scsi_qla_host_t *vha;
2719 struct qla_hw_data *ha;
2720 struct req_que *req;
2721 struct rsp_que *rsp;
2727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2728 "Entered %s.\n", __func__);
2730 if (ha->flags.cpu_affinity_enabled)
2731 rsp = ha->rsp_q_map[tag + 1];
2734 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2736 ql_log(ql_log_warn, vha, 0x1093,
2737 "Failed to allocate task management IOCB.\n");
2738 return QLA_MEMORY_ALLOC_FAILED;
2740 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2742 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2743 tsk->p.tsk.entry_count = 1;
2744 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2745 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2746 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2747 tsk->p.tsk.control_flags = cpu_to_le32(type);
2748 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2749 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2750 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2751 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2752 if (type == TCF_LUN_RESET) {
2753 int_to_scsilun(l, &tsk->p.tsk.lun);
2754 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2755 sizeof(tsk->p.tsk.lun));
2759 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2760 if (rval != QLA_SUCCESS) {
2761 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2762 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2763 } else if (sts->entry_status != 0) {
2764 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2765 "Failed to complete IOCB -- error status (%x).\n",
2767 rval = QLA_FUNCTION_FAILED;
2768 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2769 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2770 "Failed to complete IOCB -- completion status (%x).\n",
2771 le16_to_cpu(sts->comp_status));
2772 rval = QLA_FUNCTION_FAILED;
2773 } else if (le16_to_cpu(sts->scsi_status) &
2774 SS_RESPONSE_INFO_LEN_VALID) {
2775 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2777 "Ignoring inconsistent data length -- not enough "
2778 "response info (%d).\n",
2779 le32_to_cpu(sts->rsp_data_len));
2780 } else if (sts->data[3]) {
2781 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2782 "Failed to complete IOCB -- response (%x).\n",
2784 rval = QLA_FUNCTION_FAILED;
2788 /* Issue marker IOCB. */
2789 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2790 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2791 if (rval2 != QLA_SUCCESS) {
2792 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2793 "Failed to issue marker IOCB (%x).\n", rval2);
2795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2796 "Done %s.\n", __func__);
2799 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2805 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
2807 struct qla_hw_data *ha = fcport->vha->hw;
2809 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2810 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2812 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2816 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
2818 struct qla_hw_data *ha = fcport->vha->hw;
2820 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2821 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2823 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2827 qla2x00_system_error(scsi_qla_host_t *vha)
2831 mbx_cmd_t *mcp = &mc;
2832 struct qla_hw_data *ha = vha->hw;
2834 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2835 return QLA_FUNCTION_FAILED;
2837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2838 "Entered %s.\n", __func__);
2840 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2841 mcp->out_mb = MBX_0;
2845 rval = qla2x00_mailbox_command(vha, mcp);
2847 if (rval != QLA_SUCCESS) {
2848 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2851 "Done %s.\n", __func__);
2858 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
2862 mbx_cmd_t *mcp = &mc;
2864 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
2865 !IS_QLA27XX(vha->hw))
2866 return QLA_FUNCTION_FAILED;
2868 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
2869 "Entered %s.\n", __func__);
2871 mcp->mb[0] = MBC_WRITE_SERDES;
2873 if (IS_QLA2031(vha->hw))
2874 mcp->mb[2] = data & 0xff;
2879 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2881 mcp->tov = MBX_TOV_SECONDS;
2883 rval = qla2x00_mailbox_command(vha, mcp);
2885 if (rval != QLA_SUCCESS) {
2886 ql_dbg(ql_dbg_mbx, vha, 0x1183,
2887 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
2890 "Done %s.\n", __func__);
2897 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
2901 mbx_cmd_t *mcp = &mc;
2903 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
2904 !IS_QLA27XX(vha->hw))
2905 return QLA_FUNCTION_FAILED;
2907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
2908 "Entered %s.\n", __func__);
2910 mcp->mb[0] = MBC_READ_SERDES;
2913 mcp->out_mb = MBX_3|MBX_1|MBX_0;
2914 mcp->in_mb = MBX_1|MBX_0;
2915 mcp->tov = MBX_TOV_SECONDS;
2917 rval = qla2x00_mailbox_command(vha, mcp);
2919 if (IS_QLA2031(vha->hw))
2920 *data = mcp->mb[1] & 0xff;
2924 if (rval != QLA_SUCCESS) {
2925 ql_dbg(ql_dbg_mbx, vha, 0x1186,
2926 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
2929 "Done %s.\n", __func__);
2936 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
2940 mbx_cmd_t *mcp = &mc;
2942 if (!IS_QLA8044(vha->hw))
2943 return QLA_FUNCTION_FAILED;
2945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
2946 "Entered %s.\n", __func__);
2948 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
2949 mcp->mb[1] = HCS_WRITE_SERDES;
2950 mcp->mb[3] = LSW(addr);
2951 mcp->mb[4] = MSW(addr);
2952 mcp->mb[5] = LSW(data);
2953 mcp->mb[6] = MSW(data);
2954 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2956 mcp->tov = MBX_TOV_SECONDS;
2958 rval = qla2x00_mailbox_command(vha, mcp);
2960 if (rval != QLA_SUCCESS) {
2961 ql_dbg(ql_dbg_mbx, vha, 0x1187,
2962 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
2965 "Done %s.\n", __func__);
2972 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
2976 mbx_cmd_t *mcp = &mc;
2978 if (!IS_QLA8044(vha->hw))
2979 return QLA_FUNCTION_FAILED;
2981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
2982 "Entered %s.\n", __func__);
2984 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
2985 mcp->mb[1] = HCS_READ_SERDES;
2986 mcp->mb[3] = LSW(addr);
2987 mcp->mb[4] = MSW(addr);
2988 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
2989 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2990 mcp->tov = MBX_TOV_SECONDS;
2992 rval = qla2x00_mailbox_command(vha, mcp);
2994 *data = mcp->mb[2] << 16 | mcp->mb[1];
2996 if (rval != QLA_SUCCESS) {
2997 ql_dbg(ql_dbg_mbx, vha, 0x118a,
2998 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3001 "Done %s.\n", __func__);
3008 * qla2x00_set_serdes_params() -
3014 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3015 uint16_t sw_em_2g, uint16_t sw_em_4g)
3019 mbx_cmd_t *mcp = &mc;
3021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3022 "Entered %s.\n", __func__);
3024 mcp->mb[0] = MBC_SERDES_PARAMS;
3026 mcp->mb[2] = sw_em_1g | BIT_15;
3027 mcp->mb[3] = sw_em_2g | BIT_15;
3028 mcp->mb[4] = sw_em_4g | BIT_15;
3029 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3031 mcp->tov = MBX_TOV_SECONDS;
3033 rval = qla2x00_mailbox_command(vha, mcp);
3035 if (rval != QLA_SUCCESS) {
3037 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3038 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3042 "Done %s.\n", __func__);
3049 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3053 mbx_cmd_t *mcp = &mc;
3055 if (!IS_FWI2_CAPABLE(vha->hw))
3056 return QLA_FUNCTION_FAILED;
3058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3059 "Entered %s.\n", __func__);
3061 mcp->mb[0] = MBC_STOP_FIRMWARE;
3063 mcp->out_mb = MBX_1|MBX_0;
3067 rval = qla2x00_mailbox_command(vha, mcp);
3069 if (rval != QLA_SUCCESS) {
3070 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3071 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3072 rval = QLA_INVALID_COMMAND;
3074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3075 "Done %s.\n", __func__);
3082 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3087 mbx_cmd_t *mcp = &mc;
3089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3090 "Entered %s.\n", __func__);
3092 if (!IS_FWI2_CAPABLE(vha->hw))
3093 return QLA_FUNCTION_FAILED;
3095 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3096 return QLA_FUNCTION_FAILED;
3098 mcp->mb[0] = MBC_TRACE_CONTROL;
3099 mcp->mb[1] = TC_EFT_ENABLE;
3100 mcp->mb[2] = LSW(eft_dma);
3101 mcp->mb[3] = MSW(eft_dma);
3102 mcp->mb[4] = LSW(MSD(eft_dma));
3103 mcp->mb[5] = MSW(MSD(eft_dma));
3104 mcp->mb[6] = buffers;
3105 mcp->mb[7] = TC_AEN_DISABLE;
3106 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3107 mcp->in_mb = MBX_1|MBX_0;
3108 mcp->tov = MBX_TOV_SECONDS;
3110 rval = qla2x00_mailbox_command(vha, mcp);
3111 if (rval != QLA_SUCCESS) {
3112 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3113 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3114 rval, mcp->mb[0], mcp->mb[1]);
3116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3117 "Done %s.\n", __func__);
3124 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3128 mbx_cmd_t *mcp = &mc;
3130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3131 "Entered %s.\n", __func__);
3133 if (!IS_FWI2_CAPABLE(vha->hw))
3134 return QLA_FUNCTION_FAILED;
3136 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3137 return QLA_FUNCTION_FAILED;
3139 mcp->mb[0] = MBC_TRACE_CONTROL;
3140 mcp->mb[1] = TC_EFT_DISABLE;
3141 mcp->out_mb = MBX_1|MBX_0;
3142 mcp->in_mb = MBX_1|MBX_0;
3143 mcp->tov = MBX_TOV_SECONDS;
3145 rval = qla2x00_mailbox_command(vha, mcp);
3146 if (rval != QLA_SUCCESS) {
3147 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3148 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3149 rval, mcp->mb[0], mcp->mb[1]);
3151 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3152 "Done %s.\n", __func__);
3159 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3160 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3164 mbx_cmd_t *mcp = &mc;
3166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3167 "Entered %s.\n", __func__);
3169 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3170 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3171 return QLA_FUNCTION_FAILED;
3173 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3174 return QLA_FUNCTION_FAILED;
3176 mcp->mb[0] = MBC_TRACE_CONTROL;
3177 mcp->mb[1] = TC_FCE_ENABLE;
3178 mcp->mb[2] = LSW(fce_dma);
3179 mcp->mb[3] = MSW(fce_dma);
3180 mcp->mb[4] = LSW(MSD(fce_dma));
3181 mcp->mb[5] = MSW(MSD(fce_dma));
3182 mcp->mb[6] = buffers;
3183 mcp->mb[7] = TC_AEN_DISABLE;
3185 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3186 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3187 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3189 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3190 mcp->tov = MBX_TOV_SECONDS;
3192 rval = qla2x00_mailbox_command(vha, mcp);
3193 if (rval != QLA_SUCCESS) {
3194 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3195 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3196 rval, mcp->mb[0], mcp->mb[1]);
3198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3199 "Done %s.\n", __func__);
3202 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3211 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3215 mbx_cmd_t *mcp = &mc;
3217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3218 "Entered %s.\n", __func__);
3220 if (!IS_FWI2_CAPABLE(vha->hw))
3221 return QLA_FUNCTION_FAILED;
3223 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3224 return QLA_FUNCTION_FAILED;
3226 mcp->mb[0] = MBC_TRACE_CONTROL;
3227 mcp->mb[1] = TC_FCE_DISABLE;
3228 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3229 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3230 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3232 mcp->tov = MBX_TOV_SECONDS;
3234 rval = qla2x00_mailbox_command(vha, mcp);
3235 if (rval != QLA_SUCCESS) {
3236 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3237 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3238 rval, mcp->mb[0], mcp->mb[1]);
3240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3241 "Done %s.\n", __func__);
3244 *wr = (uint64_t) mcp->mb[5] << 48 |
3245 (uint64_t) mcp->mb[4] << 32 |
3246 (uint64_t) mcp->mb[3] << 16 |
3247 (uint64_t) mcp->mb[2];
3249 *rd = (uint64_t) mcp->mb[9] << 48 |
3250 (uint64_t) mcp->mb[8] << 32 |
3251 (uint64_t) mcp->mb[7] << 16 |
3252 (uint64_t) mcp->mb[6];
3259 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3260 uint16_t *port_speed, uint16_t *mb)
3264 mbx_cmd_t *mcp = &mc;
3266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3267 "Entered %s.\n", __func__);
3269 if (!IS_IIDMA_CAPABLE(vha->hw))
3270 return QLA_FUNCTION_FAILED;
3272 mcp->mb[0] = MBC_PORT_PARAMS;
3273 mcp->mb[1] = loop_id;
3274 mcp->mb[2] = mcp->mb[3] = 0;
3275 mcp->mb[9] = vha->vp_idx;
3276 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3277 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3278 mcp->tov = MBX_TOV_SECONDS;
3280 rval = qla2x00_mailbox_command(vha, mcp);
3282 /* Return mailbox statuses. */
3289 if (rval != QLA_SUCCESS) {
3290 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3293 "Done %s.\n", __func__);
3295 *port_speed = mcp->mb[3];
3302 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3303 uint16_t port_speed, uint16_t *mb)
3307 mbx_cmd_t *mcp = &mc;
3309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3310 "Entered %s.\n", __func__);
3312 if (!IS_IIDMA_CAPABLE(vha->hw))
3313 return QLA_FUNCTION_FAILED;
3315 mcp->mb[0] = MBC_PORT_PARAMS;
3316 mcp->mb[1] = loop_id;
3318 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3319 mcp->mb[9] = vha->vp_idx;
3320 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3321 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3322 mcp->tov = MBX_TOV_SECONDS;
3324 rval = qla2x00_mailbox_command(vha, mcp);
3326 /* Return mailbox statuses. */
3333 if (rval != QLA_SUCCESS) {
3334 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3335 "Failed=%x.\n", rval);
3337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3338 "Done %s.\n", __func__);
3345 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3346 struct vp_rpt_id_entry_24xx *rptid_entry)
3349 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
3350 struct qla_hw_data *ha = vha->hw;
3351 scsi_qla_host_t *vp;
3352 unsigned long flags;
3355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3356 "Entered %s.\n", __func__);
3358 if (rptid_entry->entry_status != 0)
3361 if (rptid_entry->format == 0) {
3362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
3363 "Format 0 : Number of VPs setup %d, number of "
3364 "VPs acquired %d.\n",
3365 MSB(le16_to_cpu(rptid_entry->vp_count)),
3366 LSB(le16_to_cpu(rptid_entry->vp_count)));
3367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
3368 "Primary port id %02x%02x%02x.\n",
3369 rptid_entry->port_id[2], rptid_entry->port_id[1],
3370 rptid_entry->port_id[0]);
3371 } else if (rptid_entry->format == 1) {
3373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
3374 "Format 1: VP[%d] enabled - status %d - with "
3375 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
3376 rptid_entry->port_id[2], rptid_entry->port_id[1],
3377 rptid_entry->port_id[0]);
3379 /* FA-WWN is only for physical port */
3381 void *wwpn = ha->init_cb->port_name;
3384 if (rptid_entry->vp_idx_map[1] & BIT_6)
3385 wwpn = rptid_entry->reserved_4 + 8;
3387 memcpy(vha->port_name, wwpn, WWN_SIZE);
3388 fc_host_port_name(vha->host) =
3389 wwn_to_u64(vha->port_name);
3390 ql_dbg(ql_dbg_mbx, vha, 0x1018,
3391 "FA-WWN portname %016llx (%x)\n",
3392 fc_host_port_name(vha->host), MSB(stat));
3399 if (MSB(stat) != 0 && MSB(stat) != 2) {
3400 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3401 "Could not acquire ID for VP[%d].\n", vp_idx);
3406 spin_lock_irqsave(&ha->vport_slock, flags);
3407 list_for_each_entry(vp, &ha->vp_list, list) {
3408 if (vp_idx == vp->vp_idx) {
3413 spin_unlock_irqrestore(&ha->vport_slock, flags);
3418 vp->d_id.b.domain = rptid_entry->port_id[2];
3419 vp->d_id.b.area = rptid_entry->port_id[1];
3420 vp->d_id.b.al_pa = rptid_entry->port_id[0];
3423 * Cannot configure here as we are still sitting on the
3424 * response queue. Handle it in dpc context.
3426 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3429 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3430 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3431 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3432 qla2xxx_wake_dpc(vha);
3437 * qla24xx_modify_vp_config
3438 * Change VP configuration for vha
3441 * vha = adapter block pointer.
3444 * qla2xxx local function return status code.
3450 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3453 struct vp_config_entry_24xx *vpmod;
3454 dma_addr_t vpmod_dma;
3455 struct qla_hw_data *ha = vha->hw;
3456 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3458 /* This can be called by the parent */
3460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3461 "Entered %s.\n", __func__);
3463 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3465 ql_log(ql_log_warn, vha, 0x10bc,
3466 "Failed to allocate modify VP IOCB.\n");
3467 return QLA_MEMORY_ALLOC_FAILED;
3470 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3471 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3472 vpmod->entry_count = 1;
3473 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3474 vpmod->vp_count = 1;
3475 vpmod->vp_index1 = vha->vp_idx;
3476 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3478 qlt_modify_vp_config(vha, vpmod);
3480 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3481 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3482 vpmod->entry_count = 1;
3484 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3485 if (rval != QLA_SUCCESS) {
3486 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3487 "Failed to issue VP config IOCB (%x).\n", rval);
3488 } else if (vpmod->comp_status != 0) {
3489 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3490 "Failed to complete IOCB -- error status (%x).\n",
3491 vpmod->comp_status);
3492 rval = QLA_FUNCTION_FAILED;
3493 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3494 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3495 "Failed to complete IOCB -- completion status (%x).\n",
3496 le16_to_cpu(vpmod->comp_status));
3497 rval = QLA_FUNCTION_FAILED;
3500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3501 "Done %s.\n", __func__);
3502 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3504 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3510 * qla24xx_control_vp
3511 * Enable a virtual port for given host
3514 * ha = adapter block pointer.
3515 * vhba = virtual adapter (unused)
3516 * index = index number for enabled VP
3519 * qla2xxx local function return status code.
3525 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3529 struct vp_ctrl_entry_24xx *vce;
3531 struct qla_hw_data *ha = vha->hw;
3532 int vp_index = vha->vp_idx;
3533 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3536 "Entered %s enabling index %d.\n", __func__, vp_index);
3538 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3539 return QLA_PARAMETER_ERROR;
3541 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3543 ql_log(ql_log_warn, vha, 0x10c2,
3544 "Failed to allocate VP control IOCB.\n");
3545 return QLA_MEMORY_ALLOC_FAILED;
3547 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3549 vce->entry_type = VP_CTRL_IOCB_TYPE;
3550 vce->entry_count = 1;
3551 vce->command = cpu_to_le16(cmd);
3552 vce->vp_count = cpu_to_le16(1);
3554 /* index map in firmware starts with 1; decrement index
3555 * this is ok as we never use index 0
3557 map = (vp_index - 1) / 8;
3558 pos = (vp_index - 1) & 7;
3559 mutex_lock(&ha->vport_lock);
3560 vce->vp_idx_map[map] |= 1 << pos;
3561 mutex_unlock(&ha->vport_lock);
3563 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3564 if (rval != QLA_SUCCESS) {
3565 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3566 "Failed to issue VP control IOCB (%x).\n", rval);
3567 } else if (vce->entry_status != 0) {
3568 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3569 "Failed to complete IOCB -- error status (%x).\n",
3571 rval = QLA_FUNCTION_FAILED;
3572 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
3573 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3574 "Failed to complet IOCB -- completion status (%x).\n",
3575 le16_to_cpu(vce->comp_status));
3576 rval = QLA_FUNCTION_FAILED;
3578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3579 "Done %s.\n", __func__);
3582 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3588 * qla2x00_send_change_request
3589 * Receive or disable RSCN request from fabric controller
3592 * ha = adapter block pointer
3593 * format = registration format:
3595 * 1 - Fabric detected registration
3596 * 2 - N_port detected registration
3597 * 3 - Full registration
3598 * FF - clear registration
3599 * vp_idx = Virtual port index
3602 * qla2x00 local function return status code.
3609 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3614 mbx_cmd_t *mcp = &mc;
3616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3617 "Entered %s.\n", __func__);
3619 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3620 mcp->mb[1] = format;
3621 mcp->mb[9] = vp_idx;
3622 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3623 mcp->in_mb = MBX_0|MBX_1;
3624 mcp->tov = MBX_TOV_SECONDS;
3626 rval = qla2x00_mailbox_command(vha, mcp);
3628 if (rval == QLA_SUCCESS) {
3629 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3639 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3644 mbx_cmd_t *mcp = &mc;
3646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3647 "Entered %s.\n", __func__);
3649 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3650 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3651 mcp->mb[8] = MSW(addr);
3652 mcp->out_mb = MBX_8|MBX_0;
3654 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3655 mcp->out_mb = MBX_0;
3657 mcp->mb[1] = LSW(addr);
3658 mcp->mb[2] = MSW(req_dma);
3659 mcp->mb[3] = LSW(req_dma);
3660 mcp->mb[6] = MSW(MSD(req_dma));
3661 mcp->mb[7] = LSW(MSD(req_dma));
3662 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3663 if (IS_FWI2_CAPABLE(vha->hw)) {
3664 mcp->mb[4] = MSW(size);
3665 mcp->mb[5] = LSW(size);
3666 mcp->out_mb |= MBX_5|MBX_4;
3668 mcp->mb[4] = LSW(size);
3669 mcp->out_mb |= MBX_4;
3673 mcp->tov = MBX_TOV_SECONDS;
3675 rval = qla2x00_mailbox_command(vha, mcp);
3677 if (rval != QLA_SUCCESS) {
3678 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3679 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3682 "Done %s.\n", __func__);
3687 /* 84XX Support **************************************************************/
3689 struct cs84xx_mgmt_cmd {
3691 struct verify_chip_entry_84xx req;
3692 struct verify_chip_rsp_84xx rsp;
3697 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3700 struct cs84xx_mgmt_cmd *mn;
3703 unsigned long flags;
3704 struct qla_hw_data *ha = vha->hw;
3706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3707 "Entered %s.\n", __func__);
3709 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3711 return QLA_MEMORY_ALLOC_FAILED;
3715 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3716 /* Diagnostic firmware? */
3717 /* options |= MENLO_DIAG_FW; */
3718 /* We update the firmware with only one data sequence. */
3719 options |= VCO_END_OF_DATA;
3723 memset(mn, 0, sizeof(*mn));
3724 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3725 mn->p.req.entry_count = 1;
3726 mn->p.req.options = cpu_to_le16(options);
3728 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3729 "Dump of Verify Request.\n");
3730 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3731 (uint8_t *)mn, sizeof(*mn));
3733 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3734 if (rval != QLA_SUCCESS) {
3735 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3736 "Failed to issue verify IOCB (%x).\n", rval);
3740 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3741 "Dump of Verify Response.\n");
3742 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3743 (uint8_t *)mn, sizeof(*mn));
3745 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3746 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3747 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3749 "cs=%x fc=%x.\n", status[0], status[1]);
3751 if (status[0] != CS_COMPLETE) {
3752 rval = QLA_FUNCTION_FAILED;
3753 if (!(options & VCO_DONT_UPDATE_FW)) {
3754 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3755 "Firmware update failed. Retrying "
3756 "without update firmware.\n");
3757 options |= VCO_DONT_UPDATE_FW;
3758 options &= ~VCO_FORCE_UPDATE;
3762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3763 "Firmware updated to %x.\n",
3764 le32_to_cpu(mn->p.rsp.fw_ver));
3766 /* NOTE: we only update OP firmware. */
3767 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3768 ha->cs84xx->op_fw_version =
3769 le32_to_cpu(mn->p.rsp.fw_ver);
3770 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3776 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3778 if (rval != QLA_SUCCESS) {
3779 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3780 "Failed=%x.\n", rval);
3782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3783 "Done %s.\n", __func__);
3790 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3793 unsigned long flags;
3795 mbx_cmd_t *mcp = &mc;
3796 struct qla_hw_data *ha = vha->hw;
3798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3799 "Entered %s.\n", __func__);
3801 if (IS_SHADOW_REG_CAPABLE(ha))
3802 req->options |= BIT_13;
3804 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3805 mcp->mb[1] = req->options;
3806 mcp->mb[2] = MSW(LSD(req->dma));
3807 mcp->mb[3] = LSW(LSD(req->dma));
3808 mcp->mb[6] = MSW(MSD(req->dma));
3809 mcp->mb[7] = LSW(MSD(req->dma));
3810 mcp->mb[5] = req->length;
3812 mcp->mb[10] = req->rsp->id;
3813 mcp->mb[12] = req->qos;
3814 mcp->mb[11] = req->vp_idx;
3815 mcp->mb[13] = req->rid;
3816 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3819 mcp->mb[4] = req->id;
3820 /* que in ptr index */
3822 /* que out ptr index */
3823 mcp->mb[9] = *req->out_ptr = 0;
3824 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3825 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3827 mcp->flags = MBX_DMA_OUT;
3828 mcp->tov = MBX_TOV_SECONDS * 2;
3830 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3831 mcp->in_mb |= MBX_1;
3832 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3833 mcp->out_mb |= MBX_15;
3834 /* debug q create issue in SR-IOV */
3835 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3838 spin_lock_irqsave(&ha->hardware_lock, flags);
3839 if (!(req->options & BIT_0)) {
3840 WRT_REG_DWORD(req->req_q_in, 0);
3841 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3842 WRT_REG_DWORD(req->req_q_out, 0);
3844 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3846 rval = qla2x00_mailbox_command(vha, mcp);
3847 if (rval != QLA_SUCCESS) {
3848 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3849 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3852 "Done %s.\n", __func__);
3859 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3862 unsigned long flags;
3864 mbx_cmd_t *mcp = &mc;
3865 struct qla_hw_data *ha = vha->hw;
3867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3868 "Entered %s.\n", __func__);
3870 if (IS_SHADOW_REG_CAPABLE(ha))
3871 rsp->options |= BIT_13;
3873 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3874 mcp->mb[1] = rsp->options;
3875 mcp->mb[2] = MSW(LSD(rsp->dma));
3876 mcp->mb[3] = LSW(LSD(rsp->dma));
3877 mcp->mb[6] = MSW(MSD(rsp->dma));
3878 mcp->mb[7] = LSW(MSD(rsp->dma));
3879 mcp->mb[5] = rsp->length;
3880 mcp->mb[14] = rsp->msix->entry;
3881 mcp->mb[13] = rsp->rid;
3882 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3885 mcp->mb[4] = rsp->id;
3886 /* que in ptr index */
3887 mcp->mb[8] = *rsp->in_ptr = 0;
3888 /* que out ptr index */
3890 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3891 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3893 mcp->flags = MBX_DMA_OUT;
3894 mcp->tov = MBX_TOV_SECONDS * 2;
3896 if (IS_QLA81XX(ha)) {
3897 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3898 mcp->in_mb |= MBX_1;
3899 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3900 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3901 mcp->in_mb |= MBX_1;
3902 /* debug q create issue in SR-IOV */
3903 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3906 spin_lock_irqsave(&ha->hardware_lock, flags);
3907 if (!(rsp->options & BIT_0)) {
3908 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3909 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3910 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3913 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3915 rval = qla2x00_mailbox_command(vha, mcp);
3916 if (rval != QLA_SUCCESS) {
3917 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3918 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3920 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3921 "Done %s.\n", __func__);
3928 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3932 mbx_cmd_t *mcp = &mc;
3934 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3935 "Entered %s.\n", __func__);
3937 mcp->mb[0] = MBC_IDC_ACK;
3938 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3939 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3941 mcp->tov = MBX_TOV_SECONDS;
3943 rval = qla2x00_mailbox_command(vha, mcp);
3945 if (rval != QLA_SUCCESS) {
3946 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3947 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3950 "Done %s.\n", __func__);
3957 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3961 mbx_cmd_t *mcp = &mc;
3963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3964 "Entered %s.\n", __func__);
3966 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3967 !IS_QLA27XX(vha->hw))
3968 return QLA_FUNCTION_FAILED;
3970 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3971 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3972 mcp->out_mb = MBX_1|MBX_0;
3973 mcp->in_mb = MBX_1|MBX_0;
3974 mcp->tov = MBX_TOV_SECONDS;
3976 rval = qla2x00_mailbox_command(vha, mcp);
3978 if (rval != QLA_SUCCESS) {
3979 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3980 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3981 rval, mcp->mb[0], mcp->mb[1]);
3983 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3984 "Done %s.\n", __func__);
3985 *sector_size = mcp->mb[1];
3992 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3996 mbx_cmd_t *mcp = &mc;
3998 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3999 !IS_QLA27XX(vha->hw))
4000 return QLA_FUNCTION_FAILED;
4002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4003 "Entered %s.\n", __func__);
4005 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4006 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4007 FAC_OPT_CMD_WRITE_PROTECT;
4008 mcp->out_mb = MBX_1|MBX_0;
4009 mcp->in_mb = MBX_1|MBX_0;
4010 mcp->tov = MBX_TOV_SECONDS;
4012 rval = qla2x00_mailbox_command(vha, mcp);
4014 if (rval != QLA_SUCCESS) {
4015 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4016 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4017 rval, mcp->mb[0], mcp->mb[1]);
4019 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4020 "Done %s.\n", __func__);
4027 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4031 mbx_cmd_t *mcp = &mc;
4033 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4034 !IS_QLA27XX(vha->hw))
4035 return QLA_FUNCTION_FAILED;
4037 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4038 "Entered %s.\n", __func__);
4040 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4041 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4042 mcp->mb[2] = LSW(start);
4043 mcp->mb[3] = MSW(start);
4044 mcp->mb[4] = LSW(finish);
4045 mcp->mb[5] = MSW(finish);
4046 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4047 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4048 mcp->tov = MBX_TOV_SECONDS;
4050 rval = qla2x00_mailbox_command(vha, mcp);
4052 if (rval != QLA_SUCCESS) {
4053 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4054 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4055 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4057 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4058 "Done %s.\n", __func__);
4065 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4069 mbx_cmd_t *mcp = &mc;
4071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4072 "Entered %s.\n", __func__);
4074 mcp->mb[0] = MBC_RESTART_MPI_FW;
4075 mcp->out_mb = MBX_0;
4076 mcp->in_mb = MBX_0|MBX_1;
4077 mcp->tov = MBX_TOV_SECONDS;
4079 rval = qla2x00_mailbox_command(vha, mcp);
4081 if (rval != QLA_SUCCESS) {
4082 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4083 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4084 rval, mcp->mb[0], mcp->mb[1]);
4086 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4087 "Done %s.\n", __func__);
4094 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4098 mbx_cmd_t *mcp = &mc;
4102 struct qla_hw_data *ha = vha->hw;
4104 if (!IS_P3P_TYPE(ha))
4105 return QLA_FUNCTION_FAILED;
4107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4108 "Entered %s.\n", __func__);
4110 str = (void *)version;
4111 len = strlen(version);
4113 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4114 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4115 mcp->out_mb = MBX_1|MBX_0;
4116 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4117 mcp->mb[i] = cpu_to_le16p(str);
4118 mcp->out_mb |= 1<<i;
4120 for (; i < 16; i++) {
4122 mcp->out_mb |= 1<<i;
4124 mcp->in_mb = MBX_1|MBX_0;
4125 mcp->tov = MBX_TOV_SECONDS;
4127 rval = qla2x00_mailbox_command(vha, mcp);
4129 if (rval != QLA_SUCCESS) {
4130 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4131 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4134 "Done %s.\n", __func__);
4141 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4145 mbx_cmd_t *mcp = &mc;
4150 struct qla_hw_data *ha = vha->hw;
4152 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4154 return QLA_FUNCTION_FAILED;
4156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4157 "Entered %s.\n", __func__);
4159 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4161 ql_log(ql_log_warn, vha, 0x117f,
4162 "Failed to allocate driver version param.\n");
4163 return QLA_MEMORY_ALLOC_FAILED;
4166 memcpy(str, "\x7\x3\x11\x0", 4);
4168 len = dwlen * 4 - 4;
4169 memset(str + 4, 0, len);
4170 if (len > strlen(version))
4171 len = strlen(version);
4172 memcpy(str + 4, version, len);
4174 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4175 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4176 mcp->mb[2] = MSW(LSD(str_dma));
4177 mcp->mb[3] = LSW(LSD(str_dma));
4178 mcp->mb[6] = MSW(MSD(str_dma));
4179 mcp->mb[7] = LSW(MSD(str_dma));
4180 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4181 mcp->in_mb = MBX_1|MBX_0;
4182 mcp->tov = MBX_TOV_SECONDS;
4184 rval = qla2x00_mailbox_command(vha, mcp);
4186 if (rval != QLA_SUCCESS) {
4187 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4188 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4191 "Done %s.\n", __func__);
4194 dma_pool_free(ha->s_dma_pool, str, str_dma);
4200 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4204 mbx_cmd_t *mcp = &mc;
4206 if (!IS_FWI2_CAPABLE(vha->hw))
4207 return QLA_FUNCTION_FAILED;
4209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4210 "Entered %s.\n", __func__);
4212 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4213 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4214 mcp->out_mb = MBX_1|MBX_0;
4215 mcp->in_mb = MBX_1|MBX_0;
4216 mcp->tov = MBX_TOV_SECONDS;
4218 rval = qla2x00_mailbox_command(vha, mcp);
4221 if (rval != QLA_SUCCESS) {
4222 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4223 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4226 "Done %s.\n", __func__);
4233 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4234 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4238 mbx_cmd_t *mcp = &mc;
4239 struct qla_hw_data *ha = vha->hw;
4241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4242 "Entered %s.\n", __func__);
4244 if (!IS_FWI2_CAPABLE(ha))
4245 return QLA_FUNCTION_FAILED;
4250 mcp->mb[0] = MBC_READ_SFP;
4252 mcp->mb[2] = MSW(sfp_dma);
4253 mcp->mb[3] = LSW(sfp_dma);
4254 mcp->mb[6] = MSW(MSD(sfp_dma));
4255 mcp->mb[7] = LSW(MSD(sfp_dma));
4259 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4260 mcp->in_mb = MBX_1|MBX_0;
4261 mcp->tov = MBX_TOV_SECONDS;
4263 rval = qla2x00_mailbox_command(vha, mcp);
4268 if (rval != QLA_SUCCESS) {
4269 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4270 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4273 "Done %s.\n", __func__);
4280 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4281 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4285 mbx_cmd_t *mcp = &mc;
4286 struct qla_hw_data *ha = vha->hw;
4288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4289 "Entered %s.\n", __func__);
4291 if (!IS_FWI2_CAPABLE(ha))
4292 return QLA_FUNCTION_FAILED;
4300 mcp->mb[0] = MBC_WRITE_SFP;
4302 mcp->mb[2] = MSW(sfp_dma);
4303 mcp->mb[3] = LSW(sfp_dma);
4304 mcp->mb[6] = MSW(MSD(sfp_dma));
4305 mcp->mb[7] = LSW(MSD(sfp_dma));
4309 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4310 mcp->in_mb = MBX_1|MBX_0;
4311 mcp->tov = MBX_TOV_SECONDS;
4313 rval = qla2x00_mailbox_command(vha, mcp);
4315 if (rval != QLA_SUCCESS) {
4316 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4317 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4320 "Done %s.\n", __func__);
4327 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4328 uint16_t size_in_bytes, uint16_t *actual_size)
4332 mbx_cmd_t *mcp = &mc;
4334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4335 "Entered %s.\n", __func__);
4337 if (!IS_CNA_CAPABLE(vha->hw))
4338 return QLA_FUNCTION_FAILED;
4340 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4341 mcp->mb[2] = MSW(stats_dma);
4342 mcp->mb[3] = LSW(stats_dma);
4343 mcp->mb[6] = MSW(MSD(stats_dma));
4344 mcp->mb[7] = LSW(MSD(stats_dma));
4345 mcp->mb[8] = size_in_bytes >> 2;
4346 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4347 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4348 mcp->tov = MBX_TOV_SECONDS;
4350 rval = qla2x00_mailbox_command(vha, mcp);
4352 if (rval != QLA_SUCCESS) {
4353 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4354 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4355 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4358 "Done %s.\n", __func__);
4361 *actual_size = mcp->mb[2] << 2;
4368 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4373 mbx_cmd_t *mcp = &mc;
4375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4376 "Entered %s.\n", __func__);
4378 if (!IS_CNA_CAPABLE(vha->hw))
4379 return QLA_FUNCTION_FAILED;
4381 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4383 mcp->mb[2] = MSW(tlv_dma);
4384 mcp->mb[3] = LSW(tlv_dma);
4385 mcp->mb[6] = MSW(MSD(tlv_dma));
4386 mcp->mb[7] = LSW(MSD(tlv_dma));
4388 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4389 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4390 mcp->tov = MBX_TOV_SECONDS;
4392 rval = qla2x00_mailbox_command(vha, mcp);
4394 if (rval != QLA_SUCCESS) {
4395 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4396 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4397 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4400 "Done %s.\n", __func__);
4407 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4411 mbx_cmd_t *mcp = &mc;
4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4414 "Entered %s.\n", __func__);
4416 if (!IS_FWI2_CAPABLE(vha->hw))
4417 return QLA_FUNCTION_FAILED;
4419 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4420 mcp->mb[1] = LSW(risc_addr);
4421 mcp->mb[8] = MSW(risc_addr);
4422 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4423 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4426 rval = qla2x00_mailbox_command(vha, mcp);
4427 if (rval != QLA_SUCCESS) {
4428 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4429 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4432 "Done %s.\n", __func__);
4433 *data = mcp->mb[3] << 16 | mcp->mb[2];
4440 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4445 mbx_cmd_t *mcp = &mc;
4447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4448 "Entered %s.\n", __func__);
4450 memset(mcp->mb, 0 , sizeof(mcp->mb));
4451 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4452 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4454 /* transfer count */
4455 mcp->mb[10] = LSW(mreq->transfer_size);
4456 mcp->mb[11] = MSW(mreq->transfer_size);
4458 /* send data address */
4459 mcp->mb[14] = LSW(mreq->send_dma);
4460 mcp->mb[15] = MSW(mreq->send_dma);
4461 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4462 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4464 /* receive data address */
4465 mcp->mb[16] = LSW(mreq->rcv_dma);
4466 mcp->mb[17] = MSW(mreq->rcv_dma);
4467 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4468 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4470 /* Iteration count */
4471 mcp->mb[18] = LSW(mreq->iteration_count);
4472 mcp->mb[19] = MSW(mreq->iteration_count);
4474 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4475 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4476 if (IS_CNA_CAPABLE(vha->hw))
4477 mcp->out_mb |= MBX_2;
4478 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4480 mcp->buf_size = mreq->transfer_size;
4481 mcp->tov = MBX_TOV_SECONDS;
4482 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4484 rval = qla2x00_mailbox_command(vha, mcp);
4486 if (rval != QLA_SUCCESS) {
4487 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4488 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4489 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4490 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4493 "Done %s.\n", __func__);
4496 /* Copy mailbox information */
4497 memcpy( mresp, mcp->mb, 64);
4502 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4507 mbx_cmd_t *mcp = &mc;
4508 struct qla_hw_data *ha = vha->hw;
4510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4511 "Entered %s.\n", __func__);
4513 memset(mcp->mb, 0 , sizeof(mcp->mb));
4514 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4515 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
4516 if (IS_CNA_CAPABLE(ha)) {
4517 mcp->mb[1] |= BIT_15;
4518 mcp->mb[2] = vha->fcoe_fcf_idx;
4520 mcp->mb[16] = LSW(mreq->rcv_dma);
4521 mcp->mb[17] = MSW(mreq->rcv_dma);
4522 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4523 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4525 mcp->mb[10] = LSW(mreq->transfer_size);
4527 mcp->mb[14] = LSW(mreq->send_dma);
4528 mcp->mb[15] = MSW(mreq->send_dma);
4529 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4530 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4532 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4533 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4534 if (IS_CNA_CAPABLE(ha))
4535 mcp->out_mb |= MBX_2;
4538 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4539 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4540 mcp->in_mb |= MBX_1;
4541 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4542 mcp->in_mb |= MBX_3;
4544 mcp->tov = MBX_TOV_SECONDS;
4545 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4546 mcp->buf_size = mreq->transfer_size;
4548 rval = qla2x00_mailbox_command(vha, mcp);
4550 if (rval != QLA_SUCCESS) {
4551 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4553 rval, mcp->mb[0], mcp->mb[1]);
4555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4556 "Done %s.\n", __func__);
4559 /* Copy mailbox information */
4560 memcpy(mresp, mcp->mb, 64);
4565 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
4569 mbx_cmd_t *mcp = &mc;
4571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
4572 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
4574 mcp->mb[0] = MBC_ISP84XX_RESET;
4575 mcp->mb[1] = enable_diagnostic;
4576 mcp->out_mb = MBX_1|MBX_0;
4577 mcp->in_mb = MBX_1|MBX_0;
4578 mcp->tov = MBX_TOV_SECONDS;
4579 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4580 rval = qla2x00_mailbox_command(vha, mcp);
4582 if (rval != QLA_SUCCESS)
4583 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
4585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4586 "Done %s.\n", __func__);
4592 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
4596 mbx_cmd_t *mcp = &mc;
4598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4599 "Entered %s.\n", __func__);
4601 if (!IS_FWI2_CAPABLE(vha->hw))
4602 return QLA_FUNCTION_FAILED;
4604 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
4605 mcp->mb[1] = LSW(risc_addr);
4606 mcp->mb[2] = LSW(data);
4607 mcp->mb[3] = MSW(data);
4608 mcp->mb[8] = MSW(risc_addr);
4609 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
4613 rval = qla2x00_mailbox_command(vha, mcp);
4614 if (rval != QLA_SUCCESS) {
4615 ql_dbg(ql_dbg_mbx, vha, 0x1101,
4616 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4619 "Done %s.\n", __func__);
4626 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4629 uint32_t stat, timer;
4631 struct qla_hw_data *ha = vha->hw;
4632 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4637 "Entered %s.\n", __func__);
4639 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4641 /* Write the MBC data to the registers */
4642 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
4643 WRT_REG_WORD(®->mailbox1, mb[0]);
4644 WRT_REG_WORD(®->mailbox2, mb[1]);
4645 WRT_REG_WORD(®->mailbox3, mb[2]);
4646 WRT_REG_WORD(®->mailbox4, mb[3]);
4648 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
4650 /* Poll for MBC interrupt */
4651 for (timer = 6000000; timer; timer--) {
4652 /* Check for pending interrupts. */
4653 stat = RD_REG_DWORD(®->host_status);
4654 if (stat & HSRX_RISC_INT) {
4657 if (stat == 0x1 || stat == 0x2 ||
4658 stat == 0x10 || stat == 0x11) {
4659 set_bit(MBX_INTERRUPT,
4660 &ha->mbx_cmd_flags);
4661 mb0 = RD_REG_WORD(®->mailbox0);
4662 WRT_REG_DWORD(®->hccr,
4663 HCCRX_CLR_RISC_INT);
4664 RD_REG_DWORD(®->hccr);
4671 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4672 rval = mb0 & MBS_MASK;
4674 rval = QLA_FUNCTION_FAILED;
4676 if (rval != QLA_SUCCESS) {
4677 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4678 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4681 "Done %s.\n", __func__);
4688 qla2x00_get_data_rate(scsi_qla_host_t *vha)
4692 mbx_cmd_t *mcp = &mc;
4693 struct qla_hw_data *ha = vha->hw;
4695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4696 "Entered %s.\n", __func__);
4698 if (!IS_FWI2_CAPABLE(ha))
4699 return QLA_FUNCTION_FAILED;
4701 mcp->mb[0] = MBC_DATA_RATE;
4703 mcp->out_mb = MBX_1|MBX_0;
4704 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4705 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4706 mcp->in_mb |= MBX_3;
4707 mcp->tov = MBX_TOV_SECONDS;
4709 rval = qla2x00_mailbox_command(vha, mcp);
4710 if (rval != QLA_SUCCESS) {
4711 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4712 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4715 "Done %s.\n", __func__);
4716 if (mcp->mb[1] != 0x7)
4717 ha->link_data_rate = mcp->mb[1];
4724 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4728 mbx_cmd_t *mcp = &mc;
4729 struct qla_hw_data *ha = vha->hw;
4731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4732 "Entered %s.\n", __func__);
4734 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
4736 return QLA_FUNCTION_FAILED;
4737 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4738 mcp->out_mb = MBX_0;
4739 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4740 mcp->tov = MBX_TOV_SECONDS;
4743 rval = qla2x00_mailbox_command(vha, mcp);
4745 if (rval != QLA_SUCCESS) {
4746 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4747 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4749 /* Copy all bits to preserve original value */
4750 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4753 "Done %s.\n", __func__);
4759 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4763 mbx_cmd_t *mcp = &mc;
4765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4766 "Entered %s.\n", __func__);
4768 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4769 /* Copy all bits to preserve original setting */
4770 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4771 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4773 mcp->tov = MBX_TOV_SECONDS;
4775 rval = qla2x00_mailbox_command(vha, mcp);
4777 if (rval != QLA_SUCCESS) {
4778 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4779 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4781 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4782 "Done %s.\n", __func__);
4789 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4794 mbx_cmd_t *mcp = &mc;
4795 struct qla_hw_data *ha = vha->hw;
4797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4798 "Entered %s.\n", __func__);
4800 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4801 return QLA_FUNCTION_FAILED;
4803 mcp->mb[0] = MBC_PORT_PARAMS;
4804 mcp->mb[1] = loop_id;
4805 if (ha->flags.fcp_prio_enabled)
4809 mcp->mb[4] = priority & 0xf;
4810 mcp->mb[9] = vha->vp_idx;
4811 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4812 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4815 rval = qla2x00_mailbox_command(vha, mcp);
4823 if (rval != QLA_SUCCESS) {
4824 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4827 "Done %s.\n", __func__);
4834 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
4836 int rval = QLA_FUNCTION_FAILED;
4837 struct qla_hw_data *ha = vha->hw;
4840 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
4841 ql_dbg(ql_dbg_mbx, vha, 0x1150,
4842 "Thermal not supported by this card.\n");
4846 if (IS_QLA25XX(ha)) {
4847 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4848 ha->pdev->subsystem_device == 0x0175) {
4849 rval = qla2x00_read_sfp(vha, 0, &byte,
4850 0x98, 0x1, 1, BIT_13|BIT_0);
4854 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4855 ha->pdev->subsystem_device == 0x338e) {
4856 rval = qla2x00_read_sfp(vha, 0, &byte,
4857 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
4861 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
4862 "Thermal not supported by this card.\n");
4866 if (IS_QLA82XX(ha)) {
4867 *temp = qla82xx_read_temperature(vha);
4870 } else if (IS_QLA8044(ha)) {
4871 *temp = qla8044_read_temperature(vha);
4876 rval = qla2x00_read_asic_temperature(vha, temp);
4881 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4884 struct qla_hw_data *ha = vha->hw;
4886 mbx_cmd_t *mcp = &mc;
4888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4889 "Entered %s.\n", __func__);
4891 if (!IS_FWI2_CAPABLE(ha))
4892 return QLA_FUNCTION_FAILED;
4894 memset(mcp, 0, sizeof(mbx_cmd_t));
4895 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4898 mcp->out_mb = MBX_1|MBX_0;
4903 rval = qla2x00_mailbox_command(vha, mcp);
4904 if (rval != QLA_SUCCESS) {
4905 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4906 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4909 "Done %s.\n", __func__);
4916 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4919 struct qla_hw_data *ha = vha->hw;
4921 mbx_cmd_t *mcp = &mc;
4923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4924 "Entered %s.\n", __func__);
4926 if (!IS_P3P_TYPE(ha))
4927 return QLA_FUNCTION_FAILED;
4929 memset(mcp, 0, sizeof(mbx_cmd_t));
4930 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4933 mcp->out_mb = MBX_1|MBX_0;
4938 rval = qla2x00_mailbox_command(vha, mcp);
4939 if (rval != QLA_SUCCESS) {
4940 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4941 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4944 "Done %s.\n", __func__);
4951 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4953 struct qla_hw_data *ha = vha->hw;
4955 mbx_cmd_t *mcp = &mc;
4956 int rval = QLA_FUNCTION_FAILED;
4958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4959 "Entered %s.\n", __func__);
4961 memset(mcp->mb, 0 , sizeof(mcp->mb));
4962 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4963 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4964 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4965 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4967 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4968 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4969 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4971 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4972 mcp->tov = MBX_TOV_SECONDS;
4973 rval = qla2x00_mailbox_command(vha, mcp);
4975 /* Always copy back return mailbox values. */
4976 if (rval != QLA_SUCCESS) {
4977 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4978 "mailbox command FAILED=0x%x, subcode=%x.\n",
4979 (mcp->mb[1] << 16) | mcp->mb[0],
4980 (mcp->mb[3] << 16) | mcp->mb[2]);
4982 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4983 "Done %s.\n", __func__);
4984 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4985 if (!ha->md_template_size) {
4986 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4987 "Null template size obtained.\n");
4988 rval = QLA_FUNCTION_FAILED;
4995 qla82xx_md_get_template(scsi_qla_host_t *vha)
4997 struct qla_hw_data *ha = vha->hw;
4999 mbx_cmd_t *mcp = &mc;
5000 int rval = QLA_FUNCTION_FAILED;
5002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5003 "Entered %s.\n", __func__);
5005 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5006 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5007 if (!ha->md_tmplt_hdr) {
5008 ql_log(ql_log_warn, vha, 0x1124,
5009 "Unable to allocate memory for Minidump template.\n");
5013 memset(mcp->mb, 0 , sizeof(mcp->mb));
5014 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5015 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5016 mcp->mb[2] = LSW(RQST_TMPLT);
5017 mcp->mb[3] = MSW(RQST_TMPLT);
5018 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5019 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5020 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5021 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5022 mcp->mb[8] = LSW(ha->md_template_size);
5023 mcp->mb[9] = MSW(ha->md_template_size);
5025 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5026 mcp->tov = MBX_TOV_SECONDS;
5027 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5028 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5029 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5030 rval = qla2x00_mailbox_command(vha, mcp);
5032 if (rval != QLA_SUCCESS) {
5033 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5034 "mailbox command FAILED=0x%x, subcode=%x.\n",
5035 ((mcp->mb[1] << 16) | mcp->mb[0]),
5036 ((mcp->mb[3] << 16) | mcp->mb[2]));
5038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5039 "Done %s.\n", __func__);
5044 qla8044_md_get_template(scsi_qla_host_t *vha)
5046 struct qla_hw_data *ha = vha->hw;
5048 mbx_cmd_t *mcp = &mc;
5049 int rval = QLA_FUNCTION_FAILED;
5050 int offset = 0, size = MINIDUMP_SIZE_36K;
5051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5052 "Entered %s.\n", __func__);
5054 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5055 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5056 if (!ha->md_tmplt_hdr) {
5057 ql_log(ql_log_warn, vha, 0xb11b,
5058 "Unable to allocate memory for Minidump template.\n");
5062 memset(mcp->mb, 0 , sizeof(mcp->mb));
5063 while (offset < ha->md_template_size) {
5064 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5065 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5066 mcp->mb[2] = LSW(RQST_TMPLT);
5067 mcp->mb[3] = MSW(RQST_TMPLT);
5068 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5069 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5070 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5071 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5072 mcp->mb[8] = LSW(size);
5073 mcp->mb[9] = MSW(size);
5074 mcp->mb[10] = offset & 0x0000FFFF;
5075 mcp->mb[11] = offset & 0xFFFF0000;
5076 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5077 mcp->tov = MBX_TOV_SECONDS;
5078 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5079 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5080 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5081 rval = qla2x00_mailbox_command(vha, mcp);
5083 if (rval != QLA_SUCCESS) {
5084 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5085 "mailbox command FAILED=0x%x, subcode=%x.\n",
5086 ((mcp->mb[1] << 16) | mcp->mb[0]),
5087 ((mcp->mb[3] << 16) | mcp->mb[2]));
5090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5091 "Done %s.\n", __func__);
5092 offset = offset + size;
5098 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5101 struct qla_hw_data *ha = vha->hw;
5103 mbx_cmd_t *mcp = &mc;
5105 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5106 return QLA_FUNCTION_FAILED;
5108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5109 "Entered %s.\n", __func__);
5111 memset(mcp, 0, sizeof(mbx_cmd_t));
5112 mcp->mb[0] = MBC_SET_LED_CONFIG;
5113 mcp->mb[1] = led_cfg[0];
5114 mcp->mb[2] = led_cfg[1];
5115 if (IS_QLA8031(ha)) {
5116 mcp->mb[3] = led_cfg[2];
5117 mcp->mb[4] = led_cfg[3];
5118 mcp->mb[5] = led_cfg[4];
5119 mcp->mb[6] = led_cfg[5];
5122 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5124 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5129 rval = qla2x00_mailbox_command(vha, mcp);
5130 if (rval != QLA_SUCCESS) {
5131 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5132 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5134 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5135 "Done %s.\n", __func__);
5142 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5145 struct qla_hw_data *ha = vha->hw;
5147 mbx_cmd_t *mcp = &mc;
5149 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5150 return QLA_FUNCTION_FAILED;
5152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5153 "Entered %s.\n", __func__);
5155 memset(mcp, 0, sizeof(mbx_cmd_t));
5156 mcp->mb[0] = MBC_GET_LED_CONFIG;
5158 mcp->out_mb = MBX_0;
5159 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5161 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5165 rval = qla2x00_mailbox_command(vha, mcp);
5166 if (rval != QLA_SUCCESS) {
5167 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5168 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5170 led_cfg[0] = mcp->mb[1];
5171 led_cfg[1] = mcp->mb[2];
5172 if (IS_QLA8031(ha)) {
5173 led_cfg[2] = mcp->mb[3];
5174 led_cfg[3] = mcp->mb[4];
5175 led_cfg[4] = mcp->mb[5];
5176 led_cfg[5] = mcp->mb[6];
5178 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5179 "Done %s.\n", __func__);
5186 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5189 struct qla_hw_data *ha = vha->hw;
5191 mbx_cmd_t *mcp = &mc;
5193 if (!IS_P3P_TYPE(ha))
5194 return QLA_FUNCTION_FAILED;
5196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5197 "Entered %s.\n", __func__);
5199 memset(mcp, 0, sizeof(mbx_cmd_t));
5200 mcp->mb[0] = MBC_SET_LED_CONFIG;
5206 mcp->out_mb = MBX_7|MBX_0;
5208 mcp->tov = MBX_TOV_SECONDS;
5211 rval = qla2x00_mailbox_command(vha, mcp);
5212 if (rval != QLA_SUCCESS) {
5213 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5214 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5217 "Done %s.\n", __func__);
5224 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5227 struct qla_hw_data *ha = vha->hw;
5229 mbx_cmd_t *mcp = &mc;
5231 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5232 return QLA_FUNCTION_FAILED;
5234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5235 "Entered %s.\n", __func__);
5237 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5238 mcp->mb[1] = LSW(reg);
5239 mcp->mb[2] = MSW(reg);
5240 mcp->mb[3] = LSW(data);
5241 mcp->mb[4] = MSW(data);
5242 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5244 mcp->in_mb = MBX_1|MBX_0;
5245 mcp->tov = MBX_TOV_SECONDS;
5247 rval = qla2x00_mailbox_command(vha, mcp);
5249 if (rval != QLA_SUCCESS) {
5250 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5251 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5253 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5254 "Done %s.\n", __func__);
5261 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5264 struct qla_hw_data *ha = vha->hw;
5266 mbx_cmd_t *mcp = &mc;
5268 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5270 "Implicit LOGO Unsupported.\n");
5271 return QLA_FUNCTION_FAILED;
5275 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5276 "Entering %s.\n", __func__);
5278 /* Perform Implicit LOGO. */
5279 mcp->mb[0] = MBC_PORT_LOGOUT;
5280 mcp->mb[1] = fcport->loop_id;
5281 mcp->mb[10] = BIT_15;
5282 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5284 mcp->tov = MBX_TOV_SECONDS;
5286 rval = qla2x00_mailbox_command(vha, mcp);
5287 if (rval != QLA_SUCCESS)
5288 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5289 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5292 "Done %s.\n", __func__);
5298 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5302 mbx_cmd_t *mcp = &mc;
5303 struct qla_hw_data *ha = vha->hw;
5304 unsigned long retry_max_time = jiffies + (2 * HZ);
5306 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5307 return QLA_FUNCTION_FAILED;
5309 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5312 mcp->mb[0] = MBC_READ_REMOTE_REG;
5313 mcp->mb[1] = LSW(reg);
5314 mcp->mb[2] = MSW(reg);
5315 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5316 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5317 mcp->tov = MBX_TOV_SECONDS;
5319 rval = qla2x00_mailbox_command(vha, mcp);
5321 if (rval != QLA_SUCCESS) {
5322 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5323 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5324 rval, mcp->mb[0], mcp->mb[1]);
5326 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5327 if (*data == QLA8XXX_BAD_VALUE) {
5329 * During soft-reset CAMRAM register reads might
5330 * return 0xbad0bad0. So retry for MAX of 2 sec
5331 * while reading camram registers.
5333 if (time_after(jiffies, retry_max_time)) {
5334 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5335 "Failure to read CAMRAM register. "
5336 "data=0x%x.\n", *data);
5337 return QLA_FUNCTION_FAILED;
5342 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5349 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5353 mbx_cmd_t *mcp = &mc;
5354 struct qla_hw_data *ha = vha->hw;
5356 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5357 return QLA_FUNCTION_FAILED;
5359 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5361 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5362 mcp->out_mb = MBX_0;
5363 mcp->in_mb = MBX_1|MBX_0;
5364 mcp->tov = MBX_TOV_SECONDS;
5366 rval = qla2x00_mailbox_command(vha, mcp);
5368 if (rval != QLA_SUCCESS) {
5369 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5370 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5371 rval, mcp->mb[0], mcp->mb[1]);
5372 ha->isp_ops->fw_dump(vha, 0);
5374 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5381 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5382 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5386 mbx_cmd_t *mcp = &mc;
5387 uint8_t subcode = (uint8_t)options;
5388 struct qla_hw_data *ha = vha->hw;
5390 if (!IS_QLA8031(ha))
5391 return QLA_FUNCTION_FAILED;
5393 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5395 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5396 mcp->mb[1] = options;
5397 mcp->out_mb = MBX_1|MBX_0;
5398 if (subcode & BIT_2) {
5399 mcp->mb[2] = LSW(start_addr);
5400 mcp->mb[3] = MSW(start_addr);
5401 mcp->mb[4] = LSW(end_addr);
5402 mcp->mb[5] = MSW(end_addr);
5403 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5405 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5406 if (!(subcode & (BIT_2 | BIT_5)))
5407 mcp->in_mb |= MBX_4|MBX_3;
5408 mcp->tov = MBX_TOV_SECONDS;
5410 rval = qla2x00_mailbox_command(vha, mcp);
5412 if (rval != QLA_SUCCESS) {
5413 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5414 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5415 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5417 ha->isp_ops->fw_dump(vha, 0);
5419 if (subcode & BIT_5)
5420 *sector_size = mcp->mb[1];
5421 else if (subcode & (BIT_6 | BIT_7)) {
5422 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5423 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5424 } else if (subcode & (BIT_3 | BIT_4)) {
5425 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5426 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5428 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5435 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5440 mbx_cmd_t *mcp = &mc;
5442 if (!IS_MCTP_CAPABLE(vha->hw))
5443 return QLA_FUNCTION_FAILED;
5445 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5446 "Entered %s.\n", __func__);
5448 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5449 mcp->mb[1] = LSW(addr);
5450 mcp->mb[2] = MSW(req_dma);
5451 mcp->mb[3] = LSW(req_dma);
5452 mcp->mb[4] = MSW(size);
5453 mcp->mb[5] = LSW(size);
5454 mcp->mb[6] = MSW(MSD(req_dma));
5455 mcp->mb[7] = LSW(MSD(req_dma));
5456 mcp->mb[8] = MSW(addr);
5457 /* Setting RAM ID to valid */
5458 /* For MCTP RAM ID is 0x40 */
5459 mcp->mb[10] = BIT_7 | 0x40;
5461 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5465 mcp->tov = MBX_TOV_SECONDS;
5467 rval = qla2x00_mailbox_command(vha, mcp);
5469 if (rval != QLA_SUCCESS) {
5470 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5471 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5474 "Done %s.\n", __func__);