2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
70 * Issue a "HARD" reset in order for the RISC interrupt
71 * bit to be cleared. Schedule a big hammer to get
72 * out of the RISC PAUSED state.
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
87 /* Get mailbox data. */
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
103 /* Release mailbox registers. */
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
107 qla2x00_process_response_queue(rsp);
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
116 return (IRQ_HANDLED);
120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 /* Check for PCI disconnection */
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 * Schedule this (only once) on the default system
129 * workqueue so that all the adapter workqueues and the
130 * DPC thread can be shutdown cleanly.
132 schedule_work(&vha->hw->board_disable);
140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148 * @dev_id: SCSI driver HA context
150 * Called by system whenever the host adapter generates an interrupt.
152 * Returns handled flag.
155 qla2300_intr_handler(int irq, void *dev_id)
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
165 struct qla_hw_data *ha;
168 rsp = (struct rsp_que *) dev_id;
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
176 reg = &ha->iobase->isp;
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
189 hccr = RD_REG_WORD(®->hccr);
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
201 * Issue a "HARD" reset in order for the RISC
202 * interrupt bit to be cleared. Schedule a big
203 * hammer to get out of the RISC PAUSED state.
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211 } else if ((stat & HSR_RISC_INT) == 0)
214 switch (stat & 0xff) {
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
222 /* Release mailbox registers. */
223 WRT_REG_WORD(®->semaphore, 0);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
233 qla2x00_process_response_queue(rsp);
236 mb[0] = MBA_CMPLT_1_16BIT;
238 qla2x00_async_event(vha, rsp, mb);
241 mb[0] = MBA_SCSI_COMPLETION;
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 return (IRQ_HANDLED);
261 * qla2x00_mbx_completion() - Process mailbox command completions.
262 * @vha: SCSI driver HA context
263 * @mb0: Mailbox0 register
266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274 /* Read all mbox registers? */
275 WARN_ON_ONCE(ha->mbx_count > 32);
276 mboxes = (1ULL << ha->mbx_count) - 1;
278 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
280 mboxes = ha->mcp->in_mb;
282 /* Load return mailbox registers. */
283 ha->flags.mbox_int = 1;
284 ha->mailbox_out[0] = mb0;
286 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
288 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
289 if (IS_QLA2200(ha) && cnt == 8)
290 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
292 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293 else if (mboxes & BIT_0)
294 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
302 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
304 static char *event[] =
305 { "Complete", "Request Notification", "Time Extension" };
307 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309 uint16_t __iomem *wptr;
310 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
312 /* Seed data -- mailbox1 -> mailbox7. */
313 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
314 wptr = (uint16_t __iomem *)®24->mailbox1;
315 else if (IS_QLA8044(vha->hw))
316 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
320 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
321 mb[cnt] = RD_REG_WORD(wptr);
323 ql_dbg(ql_dbg_async, vha, 0x5021,
324 "Inter-Driver Communication %s -- "
325 "%04x %04x %04x %04x %04x %04x %04x.\n",
326 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
327 mb[4], mb[5], mb[6]);
329 /* Handle IDC Error completion case. */
330 case MBA_IDC_COMPLETE:
332 vha->hw->flags.idc_compl_status = 1;
333 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334 complete(&vha->hw->dcbx_comp);
339 /* Acknowledgement needed? [Notify && non-zero timeout]. */
340 timeout = (descr >> 8) & 0xf;
341 ql_dbg(ql_dbg_async, vha, 0x5022,
342 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
343 vha->host_no, event[aen & 0xff], timeout);
347 rval = qla2x00_post_idc_ack_work(vha, mb);
348 if (rval != QLA_SUCCESS)
349 ql_log(ql_log_warn, vha, 0x5023,
350 "IDC failed to post ACK.\n");
352 case MBA_IDC_TIME_EXT:
353 vha->hw->idc_extend_tmo = descr;
354 ql_dbg(ql_dbg_async, vha, 0x5087,
355 "%lu Inter-Driver Communication %s -- "
356 "Extend timeout by=%d.\n",
357 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
364 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
366 static const char *const link_speeds[] = {
367 "1", "2", "?", "4", "8", "16", "32", "10"
369 #define QLA_LAST_SPEED 7
371 if (IS_QLA2100(ha) || IS_QLA2200(ha))
372 return link_speeds[0];
373 else if (speed == 0x13)
374 return link_speeds[QLA_LAST_SPEED];
375 else if (speed < QLA_LAST_SPEED)
376 return link_speeds[speed];
378 return link_speeds[LS_UNKNOWN];
382 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
384 struct qla_hw_data *ha = vha->hw;
387 * 8200 AEN Interpretation:
389 * mb[1] = AEN Reason code
390 * mb[2] = LSW of Peg-Halt Status-1 Register
391 * mb[6] = MSW of Peg-Halt Status-1 Register
392 * mb[3] = LSW of Peg-Halt Status-2 register
393 * mb[7] = MSW of Peg-Halt Status-2 register
394 * mb[4] = IDC Device-State Register value
395 * mb[5] = IDC Driver-Presence Register value
397 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
398 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
399 mb[0], mb[1], mb[2], mb[6]);
400 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
401 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
402 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
404 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
405 IDC_HEARTBEAT_FAILURE)) {
406 ha->flags.nic_core_hung = 1;
407 ql_log(ql_log_warn, vha, 0x5060,
408 "83XX: F/W Error Reported: Check if reset required.\n");
410 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
411 uint32_t protocol_engine_id, fw_err_code, err_level;
414 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
415 * - PEG-Halt Status-1 Register:
416 * (LSW = mb[2], MSW = mb[6])
417 * Bits 0-7 = protocol-engine ID
418 * Bits 8-28 = f/w error code
419 * Bits 29-31 = Error-level
420 * Error-level 0x1 = Non-Fatal error
421 * Error-level 0x2 = Recoverable Fatal error
422 * Error-level 0x4 = UnRecoverable Fatal error
423 * - PEG-Halt Status-2 Register:
424 * (LSW = mb[3], MSW = mb[7])
426 protocol_engine_id = (mb[2] & 0xff);
427 fw_err_code = (((mb[2] & 0xff00) >> 8) |
428 ((mb[6] & 0x1fff) << 8));
429 err_level = ((mb[6] & 0xe000) >> 13);
430 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
431 "Register: protocol_engine_id=0x%x "
432 "fw_err_code=0x%x err_level=0x%x.\n",
433 protocol_engine_id, fw_err_code, err_level);
434 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
435 "Register: 0x%x%x.\n", mb[7], mb[3]);
436 if (err_level == ERR_LEVEL_NON_FATAL) {
437 ql_log(ql_log_warn, vha, 0x5063,
438 "Not a fatal error, f/w has recovered itself.\n");
439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5064,
441 "Recoverable Fatal error: Chip reset "
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_RESET);
445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
446 ql_log(ql_log_fatal, vha, 0x5065,
447 "Unrecoverable Fatal error: Set FAILED "
448 "state, reboot required.\n");
449 qla83xx_schedule_work(vha,
450 QLA83XX_NIC_CORE_UNRECOVERABLE);
454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
455 uint16_t peg_fw_state, nw_interface_link_up;
456 uint16_t nw_interface_signal_detect, sfp_status;
457 uint16_t htbt_counter, htbt_monitor_enable;
458 uint16_t sfp_additional_info, sfp_multirate;
459 uint16_t sfp_tx_fault, link_speed, dcbx_status;
462 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
463 * - PEG-to-FC Status Register:
464 * (LSW = mb[2], MSW = mb[6])
465 * Bits 0-7 = Peg-Firmware state
466 * Bit 8 = N/W Interface Link-up
467 * Bit 9 = N/W Interface signal detected
468 * Bits 10-11 = SFP Status
469 * SFP Status 0x0 = SFP+ transceiver not expected
470 * SFP Status 0x1 = SFP+ transceiver not present
471 * SFP Status 0x2 = SFP+ transceiver invalid
472 * SFP Status 0x3 = SFP+ transceiver present and
474 * Bits 12-14 = Heartbeat Counter
475 * Bit 15 = Heartbeat Monitor Enable
476 * Bits 16-17 = SFP Additional Info
477 * SFP info 0x0 = Unregocnized transceiver for
479 * SFP info 0x1 = SFP+ brand validation failed
480 * SFP info 0x2 = SFP+ speed validation failed
481 * SFP info 0x3 = SFP+ access error
482 * Bit 18 = SFP Multirate
483 * Bit 19 = SFP Tx Fault
484 * Bits 20-22 = Link Speed
485 * Bits 23-27 = Reserved
486 * Bits 28-30 = DCBX Status
487 * DCBX Status 0x0 = DCBX Disabled
488 * DCBX Status 0x1 = DCBX Enabled
489 * DCBX Status 0x2 = DCBX Exchange error
492 peg_fw_state = (mb[2] & 0x00ff);
493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
495 sfp_status = ((mb[2] & 0x0c00) >> 10);
496 htbt_counter = ((mb[2] & 0x7000) >> 12);
497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
498 sfp_additional_info = (mb[6] & 0x0003);
499 sfp_multirate = ((mb[6] & 0x0004) >> 2);
500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
501 link_speed = ((mb[6] & 0x0070) >> 4);
502 dcbx_status = ((mb[6] & 0x7000) >> 12);
504 ql_log(ql_log_warn, vha, 0x5066,
505 "Peg-to-Fc Status Register:\n"
506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
507 "nw_interface_signal_detect=0x%x"
508 "\nsfp_statis=0x%x.\n ", peg_fw_state,
509 nw_interface_link_up, nw_interface_signal_detect,
511 ql_log(ql_log_warn, vha, 0x5067,
512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
513 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
514 htbt_counter, htbt_monitor_enable,
515 sfp_additional_info, sfp_multirate);
516 ql_log(ql_log_warn, vha, 0x5068,
517 "sfp_tx_fault=0x%x, link_state=0x%x, "
518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
524 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
525 ql_log(ql_log_warn, vha, 0x5069,
526 "Heartbeat Failure encountered, chip reset "
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
534 ql_log(ql_log_info, vha, 0x506a,
535 "IDC Device-State changed = 0x%x.\n", mb[4]);
536 if (ha->flags.nic_core_reset_owner)
538 qla83xx_schedule_work(vha, MBA_IDC_AEN);
543 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
545 struct qla_hw_data *ha = vha->hw;
554 spin_lock_irqsave(&ha->vport_slock, flags);
555 list_for_each_entry(vp, &ha->vp_list, list) {
556 vp_did = vp->d_id.b24;
557 if (vp_did == rscn_entry) {
562 spin_unlock_irqrestore(&ha->vport_slock, flags);
568 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
573 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
574 if (f->loop_id == loop_id)
580 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
585 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
586 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
589 else if (f->deleted == 0)
597 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
603 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
604 if (f->d_id.b24 == id->b24) {
607 else if (f->deleted == 0)
615 * qla2x00_async_event() - Process aynchronous events.
616 * @vha: SCSI driver HA context
617 * @rsp: response queue
618 * @mb: Mailbox registers (0 - 3)
621 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
626 struct qla_hw_data *ha = vha->hw;
627 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
628 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
629 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
630 uint32_t rscn_entry, host_pid;
632 fc_port_t *fcport = NULL;
634 if (!vha->hw->flags.fw_started)
637 /* Setup to process RIO completion. */
639 if (IS_CNA_CAPABLE(ha))
642 case MBA_SCSI_COMPLETION:
643 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
646 case MBA_CMPLT_1_16BIT:
649 mb[0] = MBA_SCSI_COMPLETION;
651 case MBA_CMPLT_2_16BIT:
655 mb[0] = MBA_SCSI_COMPLETION;
657 case MBA_CMPLT_3_16BIT:
662 mb[0] = MBA_SCSI_COMPLETION;
664 case MBA_CMPLT_4_16BIT:
668 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
670 mb[0] = MBA_SCSI_COMPLETION;
672 case MBA_CMPLT_5_16BIT:
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
677 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
679 mb[0] = MBA_SCSI_COMPLETION;
681 case MBA_CMPLT_2_32BIT:
682 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
683 handles[1] = le32_to_cpu(
684 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
685 RD_MAILBOX_REG(ha, reg, 6));
687 mb[0] = MBA_SCSI_COMPLETION;
694 case MBA_SCSI_COMPLETION: /* Fast Post */
695 if (!vha->flags.online)
698 for (cnt = 0; cnt < handle_cnt; cnt++)
699 qla2x00_process_completed_request(vha, rsp->req,
703 case MBA_RESET: /* Reset */
704 ql_dbg(ql_dbg_async, vha, 0x5002,
705 "Asynchronous RESET.\n");
707 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
710 case MBA_SYSTEM_ERR: /* System Error */
711 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
712 RD_REG_WORD(®24->mailbox7) : 0;
713 ql_log(ql_log_warn, vha, 0x5003,
714 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
715 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
717 ha->isp_ops->fw_dump(vha, 1);
718 ha->flags.fw_init_done = 0;
721 if (IS_FWI2_CAPABLE(ha)) {
722 if (mb[1] == 0 && mb[2] == 0) {
723 ql_log(ql_log_fatal, vha, 0x5004,
724 "Unrecoverable Hardware Error: adapter "
725 "marked OFFLINE!\n");
726 vha->flags.online = 0;
727 vha->device_flags |= DFLG_DEV_FAILED;
729 /* Check to see if MPI timeout occurred */
730 if ((mbx & MBX_3) && (ha->port_no == 0))
731 set_bit(MPI_RESET_NEEDED,
734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
736 } else if (mb[1] == 0) {
737 ql_log(ql_log_fatal, vha, 0x5005,
738 "Unrecoverable Hardware Error: adapter marked "
740 vha->flags.online = 0;
741 vha->device_flags |= DFLG_DEV_FAILED;
743 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
746 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
747 ql_log(ql_log_warn, vha, 0x5006,
748 "ISP Request Transfer Error (%x).\n", mb[1]);
750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
753 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
754 ql_log(ql_log_warn, vha, 0x5007,
755 "ISP Response Transfer Error (%x).\n", mb[1]);
757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
760 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
761 ql_dbg(ql_dbg_async, vha, 0x5008,
762 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
765 case MBA_LOOP_INIT_ERR:
766 ql_log(ql_log_warn, vha, 0x5090,
767 "LOOP INIT ERROR (%x).\n", mb[1]);
768 ha->isp_ops->fw_dump(vha, 1);
769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
772 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
773 ha->flags.lip_ae = 1;
775 ql_dbg(ql_dbg_async, vha, 0x5009,
776 "LIP occurred (%x).\n", mb[1]);
778 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
779 atomic_set(&vha->loop_state, LOOP_DOWN);
780 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
781 qla2x00_mark_all_devices_lost(vha, 1);
785 atomic_set(&vha->vp_state, VP_FAILED);
786 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
789 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
790 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
792 vha->flags.management_server_logged_in = 0;
793 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
796 case MBA_LOOP_UP: /* Loop Up Event */
797 if (IS_QLA2100(ha) || IS_QLA2200(ha))
798 ha->link_data_rate = PORT_SPEED_1GB;
800 ha->link_data_rate = mb[1];
802 ql_log(ql_log_info, vha, 0x500a,
803 "LOOP UP detected (%s Gbps).\n",
804 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
806 vha->flags.management_server_logged_in = 0;
807 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
809 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
810 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
811 qla2xxx_wake_dpc(vha);
815 case MBA_LOOP_DOWN: /* Loop Down Event */
817 ha->flags.lip_ae = 0;
818 ha->current_topology = 0;
820 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
821 ? RD_REG_WORD(®24->mailbox4) : 0;
822 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
824 ql_log(ql_log_info, vha, 0x500b,
825 "LOOP DOWN detected (%x %x %x %x).\n",
826 mb[1], mb[2], mb[3], mbx);
828 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
829 atomic_set(&vha->loop_state, LOOP_DOWN);
830 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
832 * In case of loop down, restore WWPN from
833 * NVRAM in case of FA-WWPN capable ISP
834 * Restore for Physical Port only
837 if (ha->flags.fawwpn_enabled) {
838 void *wwpn = ha->init_cb->port_name;
839 memcpy(vha->port_name, wwpn, WWN_SIZE);
840 fc_host_port_name(vha->host) =
841 wwn_to_u64(vha->port_name);
842 ql_dbg(ql_dbg_init + ql_dbg_verbose,
843 vha, 0x00d8, "LOOP DOWN detected,"
844 "restore WWPN %016llx\n",
845 wwn_to_u64(vha->port_name));
848 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
851 vha->device_flags |= DFLG_NO_CABLE;
852 qla2x00_mark_all_devices_lost(vha, 1);
856 atomic_set(&vha->vp_state, VP_FAILED);
857 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
860 vha->flags.management_server_logged_in = 0;
861 ha->link_data_rate = PORT_SPEED_UNKNOWN;
862 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
865 case MBA_LIP_RESET: /* LIP reset occurred */
866 ql_dbg(ql_dbg_async, vha, 0x500c,
867 "LIP reset occurred (%x).\n", mb[1]);
869 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
870 atomic_set(&vha->loop_state, LOOP_DOWN);
871 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
872 qla2x00_mark_all_devices_lost(vha, 1);
876 atomic_set(&vha->vp_state, VP_FAILED);
877 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
880 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
882 ha->operating_mode = LOOP;
883 vha->flags.management_server_logged_in = 0;
884 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
887 /* case MBA_DCBX_COMPLETE: */
888 case MBA_POINT_TO_POINT: /* Point-to-Point */
889 ha->flags.lip_ae = 0;
894 if (IS_CNA_CAPABLE(ha)) {
895 ql_dbg(ql_dbg_async, vha, 0x500d,
896 "DCBX Completed -- %04x %04x %04x.\n",
897 mb[1], mb[2], mb[3]);
898 if (ha->notify_dcbx_comp && !vha->vp_idx)
899 complete(&ha->dcbx_comp);
902 ql_dbg(ql_dbg_async, vha, 0x500e,
903 "Asynchronous P2P MODE received.\n");
906 * Until there's a transition from loop down to loop up, treat
907 * this as loop down only.
909 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
910 atomic_set(&vha->loop_state, LOOP_DOWN);
911 if (!atomic_read(&vha->loop_down_timer))
912 atomic_set(&vha->loop_down_timer,
915 qla2x00_mark_all_devices_lost(vha, 1);
919 atomic_set(&vha->vp_state, VP_FAILED);
920 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
923 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
924 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
926 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
927 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
929 vha->flags.management_server_logged_in = 0;
932 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
936 ql_dbg(ql_dbg_async, vha, 0x500f,
937 "Configuration change detected: value=%x.\n", mb[1]);
939 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
940 atomic_set(&vha->loop_state, LOOP_DOWN);
941 if (!atomic_read(&vha->loop_down_timer))
942 atomic_set(&vha->loop_down_timer,
944 qla2x00_mark_all_devices_lost(vha, 1);
948 atomic_set(&vha->vp_state, VP_FAILED);
949 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
952 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
953 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
956 case MBA_PORT_UPDATE: /* Port database update */
958 * Handle only global and vn-port update events
961 * mb[1] = N_Port handle of changed port
962 * OR 0xffff for global event
963 * mb[2] = New login state
964 * 7 = Port logged out
965 * mb[3] = LSB is vp_idx, 0xff = all vps
967 * Skip processing if:
968 * Event is global, vp_idx is NOT all vps,
969 * vp_idx does not match
970 * Event is not global, vp_idx does not match
972 if (IS_QLA2XXX_MIDTYPE(ha) &&
973 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
974 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
978 ql_dbg(ql_dbg_async, vha, 0x5010,
979 "Port %s %04x %04x %04x.\n",
980 mb[1] == 0xffff ? "unavailable" : "logout",
981 mb[1], mb[2], mb[3]);
984 goto global_port_update;
986 if (mb[1] == NPH_SNS_LID(ha)) {
987 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
988 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
992 /* use handle_cnt for loop id/nport handle */
993 if (IS_FWI2_CAPABLE(ha))
994 handle_cnt = NPH_SNS;
996 handle_cnt = SIMPLE_NAME_SERVER;
997 if (mb[1] == handle_cnt) {
998 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
999 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1004 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1007 if (atomic_read(&fcport->state) != FCS_ONLINE)
1009 ql_dbg(ql_dbg_async, vha, 0x508a,
1010 "Marking port lost loopid=%04x portid=%06x.\n",
1011 fcport->loop_id, fcport->d_id.b24);
1012 if (qla_ini_mode_enabled(vha)) {
1013 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1014 fcport->logout_on_delete = 0;
1015 qlt_schedule_sess_for_deletion(fcport);
1020 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1021 atomic_set(&vha->loop_state, LOOP_DOWN);
1022 atomic_set(&vha->loop_down_timer,
1024 vha->device_flags |= DFLG_NO_CABLE;
1025 qla2x00_mark_all_devices_lost(vha, 1);
1029 atomic_set(&vha->vp_state, VP_FAILED);
1030 fc_vport_set_state(vha->fc_vport,
1032 qla2x00_mark_all_devices_lost(vha, 1);
1035 vha->flags.management_server_logged_in = 0;
1036 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1041 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1042 * event etc. earlier indicating loop is down) then process
1043 * it. Otherwise ignore it and Wait for RSCN to come in.
1045 atomic_set(&vha->loop_down_timer, 0);
1046 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1047 !ha->flags.n2n_ae &&
1048 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1049 ql_dbg(ql_dbg_async, vha, 0x5011,
1050 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1051 mb[1], mb[2], mb[3]);
1055 ql_dbg(ql_dbg_async, vha, 0x5012,
1056 "Port database changed %04x %04x %04x.\n",
1057 mb[1], mb[2], mb[3]);
1060 * Mark all devices as missing so we will login again.
1062 atomic_set(&vha->loop_state, LOOP_UP);
1063 vha->scan.scan_retry = 0;
1065 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1066 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1067 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1070 case MBA_RSCN_UPDATE: /* State Change Registration */
1071 /* Check if the Vport has issued a SCR */
1072 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1074 /* Only handle SCNs for our Vport index. */
1075 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1078 ql_dbg(ql_dbg_async, vha, 0x5013,
1079 "RSCN database changed -- %04x %04x %04x.\n",
1080 mb[1], mb[2], mb[3]);
1082 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1083 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1084 | vha->d_id.b.al_pa;
1085 if (rscn_entry == host_pid) {
1086 ql_dbg(ql_dbg_async, vha, 0x5014,
1087 "Ignoring RSCN update to local host "
1088 "port ID (%06x).\n", host_pid);
1092 /* Ignore reserved bits from RSCN-payload. */
1093 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1095 /* Skip RSCNs for virtual ports on the same physical port */
1096 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1099 atomic_set(&vha->loop_down_timer, 0);
1100 vha->flags.management_server_logged_in = 0;
1102 struct event_arg ea;
1104 memset(&ea, 0, sizeof(ea));
1105 ea.event = FCME_RSCN;
1106 ea.id.b24 = rscn_entry;
1107 ea.id.b.rsvd_1 = rscn_entry >> 24;
1108 qla2x00_fcport_event_handler(vha, &ea);
1109 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1112 /* case MBA_RIO_RESPONSE: */
1113 case MBA_ZIO_RESPONSE:
1114 ql_dbg(ql_dbg_async, vha, 0x5015,
1115 "[R|Z]IO update completion.\n");
1117 if (IS_FWI2_CAPABLE(ha))
1118 qla24xx_process_response_queue(vha, rsp);
1120 qla2x00_process_response_queue(rsp);
1123 case MBA_DISCARD_RND_FRAME:
1124 ql_dbg(ql_dbg_async, vha, 0x5016,
1125 "Discard RND Frame -- %04x %04x %04x.\n",
1126 mb[1], mb[2], mb[3]);
1129 case MBA_TRACE_NOTIFICATION:
1130 ql_dbg(ql_dbg_async, vha, 0x5017,
1131 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1134 case MBA_ISP84XX_ALERT:
1135 ql_dbg(ql_dbg_async, vha, 0x5018,
1136 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1137 mb[1], mb[2], mb[3]);
1139 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1141 case A84_PANIC_RECOVERY:
1142 ql_log(ql_log_info, vha, 0x5019,
1143 "Alert 84XX: panic recovery %04x %04x.\n",
1146 case A84_OP_LOGIN_COMPLETE:
1147 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1148 ql_log(ql_log_info, vha, 0x501a,
1149 "Alert 84XX: firmware version %x.\n",
1150 ha->cs84xx->op_fw_version);
1152 case A84_DIAG_LOGIN_COMPLETE:
1153 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1154 ql_log(ql_log_info, vha, 0x501b,
1155 "Alert 84XX: diagnostic firmware version %x.\n",
1156 ha->cs84xx->diag_fw_version);
1158 case A84_GOLD_LOGIN_COMPLETE:
1159 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1160 ha->cs84xx->fw_update = 1;
1161 ql_log(ql_log_info, vha, 0x501c,
1162 "Alert 84XX: gold firmware version %x.\n",
1163 ha->cs84xx->gold_fw_version);
1166 ql_log(ql_log_warn, vha, 0x501d,
1167 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1168 mb[1], mb[2], mb[3]);
1170 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1172 case MBA_DCBX_START:
1173 ql_dbg(ql_dbg_async, vha, 0x501e,
1174 "DCBX Started -- %04x %04x %04x.\n",
1175 mb[1], mb[2], mb[3]);
1177 case MBA_DCBX_PARAM_UPDATE:
1178 ql_dbg(ql_dbg_async, vha, 0x501f,
1179 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1180 mb[1], mb[2], mb[3]);
1182 case MBA_FCF_CONF_ERR:
1183 ql_dbg(ql_dbg_async, vha, 0x5020,
1184 "FCF Configuration Error -- %04x %04x %04x.\n",
1185 mb[1], mb[2], mb[3]);
1187 case MBA_IDC_NOTIFY:
1188 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1189 mb[4] = RD_REG_WORD(®24->mailbox4);
1190 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1191 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1192 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1193 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1195 * Extend loop down timer since port is active.
1197 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1198 atomic_set(&vha->loop_down_timer,
1200 qla2xxx_wake_dpc(vha);
1204 case MBA_IDC_COMPLETE:
1205 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1206 complete(&ha->lb_portup_comp);
1208 case MBA_IDC_TIME_EXT:
1209 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1211 qla81xx_idc_event(vha, mb[0], mb[1]);
1215 mb[4] = RD_REG_WORD(®24->mailbox4);
1216 mb[5] = RD_REG_WORD(®24->mailbox5);
1217 mb[6] = RD_REG_WORD(®24->mailbox6);
1218 mb[7] = RD_REG_WORD(®24->mailbox7);
1219 qla83xx_handle_8200_aen(vha, mb);
1222 case MBA_DPORT_DIAGNOSTICS:
1223 ql_dbg(ql_dbg_async, vha, 0x5052,
1224 "D-Port Diagnostics: %04x result=%s\n",
1226 mb[1] == 0 ? "start" :
1227 mb[1] == 1 ? "done (pass)" :
1228 mb[1] == 2 ? "done (error)" : "other");
1231 case MBA_TEMPERATURE_ALERT:
1232 ql_dbg(ql_dbg_async, vha, 0x505e,
1233 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1235 schedule_work(&ha->board_disable);
1238 case MBA_TRANS_INSERT:
1239 ql_dbg(ql_dbg_async, vha, 0x5091,
1240 "Transceiver Insertion: %04x\n", mb[1]);
1244 ql_dbg(ql_dbg_async, vha, 0x5057,
1245 "Unknown AEN:%04x %04x %04x %04x\n",
1246 mb[0], mb[1], mb[2], mb[3]);
1249 qlt_async_event(mb[0], vha, mb);
1251 if (!vha->vp_idx && ha->num_vhosts)
1252 qla2x00_alert_all_vps(rsp, mb);
1256 * qla2x00_process_completed_request() - Process a Fast Post response.
1257 * @vha: SCSI driver HA context
1258 * @req: request queue
1262 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1263 struct req_que *req, uint32_t index)
1266 struct qla_hw_data *ha = vha->hw;
1268 /* Validate handle. */
1269 if (index >= req->num_outstanding_cmds) {
1270 ql_log(ql_log_warn, vha, 0x3014,
1271 "Invalid SCSI command index (%x).\n", index);
1273 if (IS_P3P_TYPE(ha))
1274 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1276 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280 sp = req->outstanding_cmds[index];
1282 /* Free outstanding command slot. */
1283 req->outstanding_cmds[index] = NULL;
1285 /* Save ISP completion status */
1286 sp->done(sp, DID_OK << 16);
1288 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1290 if (IS_P3P_TYPE(ha))
1291 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1293 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1298 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1299 struct req_que *req, void *iocb)
1301 struct qla_hw_data *ha = vha->hw;
1302 sts_entry_t *pkt = iocb;
1306 index = LSW(pkt->handle);
1307 if (index >= req->num_outstanding_cmds) {
1308 ql_log(ql_log_warn, vha, 0x5031,
1309 "Invalid command index (%x) type %8ph.\n",
1311 if (IS_P3P_TYPE(ha))
1312 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1314 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1317 sp = req->outstanding_cmds[index];
1319 ql_log(ql_log_warn, vha, 0x5032,
1320 "Invalid completion handle (%x) -- timed-out.\n", index);
1323 if (sp->handle != index) {
1324 ql_log(ql_log_warn, vha, 0x5033,
1325 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1329 req->outstanding_cmds[index] = NULL;
1336 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1337 struct mbx_entry *mbx)
1339 const char func[] = "MBX-IOCB";
1343 struct srb_iocb *lio;
1347 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1351 lio = &sp->u.iocb_cmd;
1353 fcport = sp->fcport;
1354 data = lio->u.logio.data;
1356 data[0] = MBS_COMMAND_ERROR;
1357 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1358 QLA_LOGIO_LOGIN_RETRIED : 0;
1359 if (mbx->entry_status) {
1360 ql_dbg(ql_dbg_async, vha, 0x5043,
1361 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1362 "entry-status=%x status=%x state-flag=%x "
1363 "status-flags=%x.\n", type, sp->handle,
1364 fcport->d_id.b.domain, fcport->d_id.b.area,
1365 fcport->d_id.b.al_pa, mbx->entry_status,
1366 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1367 le16_to_cpu(mbx->status_flags));
1369 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1370 (uint8_t *)mbx, sizeof(*mbx));
1375 status = le16_to_cpu(mbx->status);
1376 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1377 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1379 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1380 ql_dbg(ql_dbg_async, vha, 0x5045,
1381 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1382 type, sp->handle, fcport->d_id.b.domain,
1383 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1384 le16_to_cpu(mbx->mb1));
1386 data[0] = MBS_COMMAND_COMPLETE;
1387 if (sp->type == SRB_LOGIN_CMD) {
1388 fcport->port_type = FCT_TARGET;
1389 if (le16_to_cpu(mbx->mb1) & BIT_0)
1390 fcport->port_type = FCT_INITIATOR;
1391 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1392 fcport->flags |= FCF_FCP2_DEVICE;
1397 data[0] = le16_to_cpu(mbx->mb0);
1399 case MBS_PORT_ID_USED:
1400 data[1] = le16_to_cpu(mbx->mb1);
1402 case MBS_LOOP_ID_USED:
1405 data[0] = MBS_COMMAND_ERROR;
1409 ql_log(ql_log_warn, vha, 0x5046,
1410 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1411 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1412 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1413 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1414 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1415 le16_to_cpu(mbx->mb7));
1422 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1423 struct mbx_24xx_entry *pkt)
1425 const char func[] = "MBX-IOCB2";
1427 struct srb_iocb *si;
1431 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1435 si = &sp->u.iocb_cmd;
1436 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1438 for (i = 0; i < sz; i++)
1439 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1441 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1447 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1448 struct nack_to_isp *pkt)
1450 const char func[] = "nack";
1454 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1458 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1459 res = QLA_FUNCTION_FAILED;
1465 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1466 sts_entry_t *pkt, int iocb_type)
1468 const char func[] = "CT_IOCB";
1471 struct bsg_job *bsg_job;
1472 struct fc_bsg_reply *bsg_reply;
1473 uint16_t comp_status;
1476 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1482 bsg_job = sp->u.bsg_job;
1483 bsg_reply = bsg_job->reply;
1485 type = "ct pass-through";
1487 comp_status = le16_to_cpu(pkt->comp_status);
1490 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1491 * fc payload to the caller
1493 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1494 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1496 if (comp_status != CS_COMPLETE) {
1497 if (comp_status == CS_DATA_UNDERRUN) {
1499 bsg_reply->reply_payload_rcv_len =
1500 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1502 ql_log(ql_log_warn, vha, 0x5048,
1503 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1505 bsg_reply->reply_payload_rcv_len);
1507 ql_log(ql_log_warn, vha, 0x5049,
1508 "CT pass-through-%s error comp_status=0x%x.\n",
1510 res = DID_ERROR << 16;
1511 bsg_reply->reply_payload_rcv_len = 0;
1513 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1514 (uint8_t *)pkt, sizeof(*pkt));
1517 bsg_reply->reply_payload_rcv_len =
1518 bsg_job->reply_payload.payload_len;
1519 bsg_job->reply_len = 0;
1522 case SRB_CT_PTHRU_CMD:
1524 * borrowing sts_entry_24xx.comp_status.
1525 * same location as ct_entry_24xx.comp_status
1527 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1528 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1537 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1538 struct sts_entry_24xx *pkt, int iocb_type)
1540 const char func[] = "ELS_CT_IOCB";
1543 struct bsg_job *bsg_job;
1544 struct fc_bsg_reply *bsg_reply;
1545 uint16_t comp_status;
1546 uint32_t fw_status[3];
1548 struct srb_iocb *els;
1550 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1556 case SRB_ELS_CMD_RPT:
1557 case SRB_ELS_CMD_HST:
1561 type = "ct pass-through";
1564 type = "Driver ELS logo";
1565 if (iocb_type != ELS_IOCB_TYPE) {
1566 ql_dbg(ql_dbg_user, vha, 0x5047,
1567 "Completing %s: (%p) type=%d.\n",
1568 type, sp, sp->type);
1573 case SRB_CT_PTHRU_CMD:
1574 /* borrowing sts_entry_24xx.comp_status.
1575 same location as ct_entry_24xx.comp_status
1577 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1578 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1583 ql_dbg(ql_dbg_user, vha, 0x503e,
1584 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1588 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1589 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1590 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1592 if (iocb_type == ELS_IOCB_TYPE) {
1593 els = &sp->u.iocb_cmd;
1594 els->u.els_plogi.fw_status[0] = fw_status[0];
1595 els->u.els_plogi.fw_status[1] = fw_status[1];
1596 els->u.els_plogi.fw_status[2] = fw_status[2];
1597 els->u.els_plogi.comp_status = fw_status[0];
1598 if (comp_status == CS_COMPLETE) {
1601 if (comp_status == CS_DATA_UNDERRUN) {
1603 els->u.els_plogi.len =
1604 le16_to_cpu(((struct els_sts_entry_24xx *)
1605 pkt)->total_byte_count);
1607 els->u.els_plogi.len = 0;
1608 res = DID_ERROR << 16;
1611 ql_log(ql_log_info, vha, 0x503f,
1612 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1613 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1614 le16_to_cpu(((struct els_sts_entry_24xx *)
1615 pkt)->total_byte_count));
1619 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1620 * fc payload to the caller
1622 bsg_job = sp->u.bsg_job;
1623 bsg_reply = bsg_job->reply;
1624 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1625 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1627 if (comp_status != CS_COMPLETE) {
1628 if (comp_status == CS_DATA_UNDERRUN) {
1630 bsg_reply->reply_payload_rcv_len =
1631 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1633 ql_dbg(ql_dbg_user, vha, 0x503f,
1634 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1635 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1636 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1637 le16_to_cpu(((struct els_sts_entry_24xx *)
1638 pkt)->total_byte_count));
1640 ql_dbg(ql_dbg_user, vha, 0x5040,
1641 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1642 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1643 type, sp->handle, comp_status,
1644 le16_to_cpu(((struct els_sts_entry_24xx *)
1645 pkt)->error_subcode_1),
1646 le16_to_cpu(((struct els_sts_entry_24xx *)
1647 pkt)->error_subcode_2));
1648 res = DID_ERROR << 16;
1649 bsg_reply->reply_payload_rcv_len = 0;
1651 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1652 fw_status, sizeof(fw_status));
1653 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1654 (uint8_t *)pkt, sizeof(*pkt));
1658 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1659 bsg_job->reply_len = 0;
1667 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1668 struct logio_entry_24xx *logio)
1670 const char func[] = "LOGIO-IOCB";
1674 struct srb_iocb *lio;
1678 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1682 lio = &sp->u.iocb_cmd;
1684 fcport = sp->fcport;
1685 data = lio->u.logio.data;
1687 data[0] = MBS_COMMAND_ERROR;
1688 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1689 QLA_LOGIO_LOGIN_RETRIED : 0;
1690 if (logio->entry_status) {
1691 ql_log(ql_log_warn, fcport->vha, 0x5034,
1692 "Async-%s error entry - %8phC hdl=%x"
1693 "portid=%02x%02x%02x entry-status=%x.\n",
1694 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1695 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1696 logio->entry_status);
1697 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1698 (uint8_t *)logio, sizeof(*logio));
1703 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1704 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1705 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1706 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1707 fcport->d_id.b.domain,
1708 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1709 le32_to_cpu(logio->io_parameter[0]));
1711 vha->hw->exch_starvation = 0;
1712 data[0] = MBS_COMMAND_COMPLETE;
1713 if (sp->type != SRB_LOGIN_CMD)
1716 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1717 if (iop[0] & BIT_4) {
1718 fcport->port_type = FCT_TARGET;
1720 fcport->flags |= FCF_FCP2_DEVICE;
1721 } else if (iop[0] & BIT_5)
1722 fcport->port_type = FCT_INITIATOR;
1725 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1727 if (logio->io_parameter[7] || logio->io_parameter[8])
1728 fcport->supported_classes |= FC_COS_CLASS2;
1729 if (logio->io_parameter[9] || logio->io_parameter[10])
1730 fcport->supported_classes |= FC_COS_CLASS3;
1735 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1736 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1737 lio->u.logio.iop[0] = iop[0];
1738 lio->u.logio.iop[1] = iop[1];
1740 case LSC_SCODE_PORTID_USED:
1741 data[0] = MBS_PORT_ID_USED;
1742 data[1] = LSW(iop[1]);
1744 case LSC_SCODE_NPORT_USED:
1745 data[0] = MBS_LOOP_ID_USED;
1747 case LSC_SCODE_CMD_FAILED:
1748 if (iop[1] == 0x0606) {
1750 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1751 * Target side acked.
1753 data[0] = MBS_COMMAND_COMPLETE;
1756 data[0] = MBS_COMMAND_ERROR;
1758 case LSC_SCODE_NOXCB:
1759 vha->hw->exch_starvation++;
1760 if (vha->hw->exch_starvation > 5) {
1761 ql_log(ql_log_warn, vha, 0xd046,
1762 "Exchange starvation. Resetting RISC\n");
1764 vha->hw->exch_starvation = 0;
1766 if (IS_P3P_TYPE(vha->hw))
1767 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1770 qla2xxx_wake_dpc(vha);
1774 data[0] = MBS_COMMAND_ERROR;
1778 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1779 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1780 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1781 sp->handle, fcport->d_id.b.domain,
1782 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1783 le16_to_cpu(logio->comp_status),
1784 le32_to_cpu(logio->io_parameter[0]),
1785 le32_to_cpu(logio->io_parameter[1]));
1792 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1794 const char func[] = "TMF-IOCB";
1798 struct srb_iocb *iocb;
1799 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1801 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1805 iocb = &sp->u.iocb_cmd;
1807 fcport = sp->fcport;
1808 iocb->u.tmf.data = QLA_SUCCESS;
1810 if (sts->entry_status) {
1811 ql_log(ql_log_warn, fcport->vha, 0x5038,
1812 "Async-%s error - hdl=%x entry-status(%x).\n",
1813 type, sp->handle, sts->entry_status);
1814 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1815 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1816 ql_log(ql_log_warn, fcport->vha, 0x5039,
1817 "Async-%s error - hdl=%x completion status(%x).\n",
1818 type, sp->handle, sts->comp_status);
1819 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1820 } else if ((le16_to_cpu(sts->scsi_status) &
1821 SS_RESPONSE_INFO_LEN_VALID)) {
1822 host_to_fcp_swap(sts->data, sizeof(sts->data));
1823 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1824 ql_log(ql_log_warn, fcport->vha, 0x503b,
1825 "Async-%s error - hdl=%x not enough response(%d).\n",
1826 type, sp->handle, sts->rsp_data_len);
1827 } else if (sts->data[3]) {
1828 ql_log(ql_log_warn, fcport->vha, 0x503c,
1829 "Async-%s error - hdl=%x response(%x).\n",
1830 type, sp->handle, sts->data[3]);
1831 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1835 if (iocb->u.tmf.data != QLA_SUCCESS)
1836 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1837 (uint8_t *)sts, sizeof(*sts));
1842 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1843 void *tsk, srb_t *sp)
1846 struct srb_iocb *iocb;
1847 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1848 uint16_t state_flags;
1849 struct nvmefc_fcp_req *fd;
1852 iocb = &sp->u.iocb_cmd;
1853 fcport = sp->fcport;
1854 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
1855 state_flags = le16_to_cpu(sts->state_flags);
1856 fd = iocb->u.nvme.desc;
1858 if (unlikely(iocb->u.nvme.aen_op))
1859 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1862 * State flags: Bit 6 and 0.
1863 * If 0 is set, we don't care about 6.
1864 * both cases resp was dma'd to host buffer
1865 * if both are 0, that is good path case.
1866 * if six is set and 0 is clear, we need to
1867 * copy resp data from status iocb to resp buffer.
1869 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1870 iocb->u.nvme.rsp_pyld_len = 0;
1871 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1872 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1873 } else if (state_flags & SF_NVME_ERSP) {
1874 uint32_t *inbuf, *outbuf;
1877 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1878 outbuf = (uint32_t *)fd->rspaddr;
1879 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1880 if (unlikely(iocb->u.nvme.rsp_pyld_len >
1881 sizeof(struct nvme_fc_ersp_iu))) {
1882 if (ql_mask_match(ql_dbg_io)) {
1883 WARN_ONCE(1, "Unexpected response payload length %u.\n",
1884 iocb->u.nvme.rsp_pyld_len);
1885 ql_log(ql_log_warn, fcport->vha, 0x5100,
1886 "Unexpected response payload length %u.\n",
1887 iocb->u.nvme.rsp_pyld_len);
1889 iocb->u.nvme.rsp_pyld_len =
1890 sizeof(struct nvme_fc_ersp_iu);
1892 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1893 for (; iter; iter--)
1894 *outbuf++ = swab32(*inbuf++);
1895 } else { /* unhandled case */
1896 ql_log(ql_log_warn, fcport->vha, 0x503a,
1897 "NVME-%s error. Unhandled state_flags of %x\n",
1898 sp->name, state_flags);
1901 fd->transferred_length = fd->payload_length -
1902 le32_to_cpu(sts->residual_len);
1904 switch (le16_to_cpu(sts->comp_status)) {
1910 case CS_PORT_UNAVAILABLE:
1911 case CS_PORT_LOGGED_OUT:
1913 ql_log(ql_log_warn, fcport->vha, 0x5060,
1914 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1915 sp->name, sp->handle, sts->comp_status,
1916 le32_to_cpu(sts->residual_len), sts->ox_id);
1917 fd->transferred_length = 0;
1918 iocb->u.nvme.rsp_pyld_len = 0;
1922 ql_log(ql_log_warn, fcport->vha, 0x5060,
1923 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1924 sp->name, sp->handle, sts->comp_status,
1925 le32_to_cpu(sts->residual_len), sts->ox_id);
1926 ret = QLA_FUNCTION_FAILED;
1932 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1933 struct vp_ctrl_entry_24xx *vce)
1935 const char func[] = "CTRLVP-IOCB";
1937 int rval = QLA_SUCCESS;
1939 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1943 if (vce->entry_status != 0) {
1944 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1945 "%s: Failed to complete IOCB -- error status (%x)\n",
1946 sp->name, vce->entry_status);
1947 rval = QLA_FUNCTION_FAILED;
1948 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1949 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1950 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1951 sp->name, le16_to_cpu(vce->comp_status),
1952 le16_to_cpu(vce->vp_idx_failed));
1953 rval = QLA_FUNCTION_FAILED;
1955 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1956 "Done %s.\n", __func__);
1964 * qla2x00_process_response_queue() - Process response queue entries.
1965 * @rsp: response queue
1968 qla2x00_process_response_queue(struct rsp_que *rsp)
1970 struct scsi_qla_host *vha;
1971 struct qla_hw_data *ha = rsp->hw;
1972 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1974 uint16_t handle_cnt;
1977 vha = pci_get_drvdata(ha->pdev);
1979 if (!vha->flags.online)
1982 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1983 pkt = (sts_entry_t *)rsp->ring_ptr;
1986 if (rsp->ring_index == rsp->length) {
1987 rsp->ring_index = 0;
1988 rsp->ring_ptr = rsp->ring;
1993 if (pkt->entry_status != 0) {
1994 qla2x00_error_entry(vha, rsp, pkt);
1995 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2000 switch (pkt->entry_type) {
2002 qla2x00_status_entry(vha, rsp, pkt);
2004 case STATUS_TYPE_21:
2005 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
2006 for (cnt = 0; cnt < handle_cnt; cnt++) {
2007 qla2x00_process_completed_request(vha, rsp->req,
2008 ((sts21_entry_t *)pkt)->handle[cnt]);
2011 case STATUS_TYPE_22:
2012 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
2013 for (cnt = 0; cnt < handle_cnt; cnt++) {
2014 qla2x00_process_completed_request(vha, rsp->req,
2015 ((sts22_entry_t *)pkt)->handle[cnt]);
2018 case STATUS_CONT_TYPE:
2019 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2022 qla2x00_mbx_iocb_entry(vha, rsp->req,
2023 (struct mbx_entry *)pkt);
2026 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2029 /* Type Not Supported. */
2030 ql_log(ql_log_warn, vha, 0x504a,
2031 "Received unknown response pkt type %x "
2032 "entry status=%x.\n",
2033 pkt->entry_type, pkt->entry_status);
2036 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2040 /* Adjust ring index */
2041 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2045 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2046 uint32_t sense_len, struct rsp_que *rsp, int res)
2048 struct scsi_qla_host *vha = sp->vha;
2049 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2050 uint32_t track_sense_len;
2052 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2053 sense_len = SCSI_SENSE_BUFFERSIZE;
2055 SET_CMD_SENSE_LEN(sp, sense_len);
2056 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2057 track_sense_len = sense_len;
2059 if (sense_len > par_sense_len)
2060 sense_len = par_sense_len;
2062 memcpy(cp->sense_buffer, sense_data, sense_len);
2064 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2065 track_sense_len -= sense_len;
2066 SET_CMD_SENSE_LEN(sp, track_sense_len);
2068 if (track_sense_len != 0) {
2069 rsp->status_srb = sp;
2074 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2075 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2076 sp->vha->host_no, cp->device->id, cp->device->lun,
2078 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2079 cp->sense_buffer, sense_len);
2083 struct scsi_dif_tuple {
2084 __be16 guard; /* Checksum */
2085 __be16 app_tag; /* APPL identifier */
2086 __be32 ref_tag; /* Target LBA or indirect LBA */
2090 * Checks the guard or meta-data for the type of error
2091 * detected by the HBA. In case of errors, we set the
2092 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2093 * to indicate to the kernel that the HBA detected error.
2096 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2098 struct scsi_qla_host *vha = sp->vha;
2099 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2100 uint8_t *ap = &sts24->data[12];
2101 uint8_t *ep = &sts24->data[20];
2102 uint32_t e_ref_tag, a_ref_tag;
2103 uint16_t e_app_tag, a_app_tag;
2104 uint16_t e_guard, a_guard;
2107 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2108 * would make guard field appear at offset 2
2110 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2111 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2112 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2113 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2114 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2115 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2117 ql_dbg(ql_dbg_io, vha, 0x3023,
2118 "iocb(s) %p Returned STATUS.\n", sts24);
2120 ql_dbg(ql_dbg_io, vha, 0x3024,
2121 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2122 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2123 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2124 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2125 a_app_tag, e_app_tag, a_guard, e_guard);
2129 * For type 3: ref & app tag is all 'f's
2130 * For type 0,1,2: app tag is all 'f's
2132 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2133 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2134 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2135 uint32_t blocks_done, resid;
2136 sector_t lba_s = scsi_get_lba(cmd);
2138 /* 2TB boundary case covered automatically with this */
2139 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2141 resid = scsi_bufflen(cmd) - (blocks_done *
2142 cmd->device->sector_size);
2144 scsi_set_resid(cmd, resid);
2145 cmd->result = DID_OK << 16;
2147 /* Update protection tag */
2148 if (scsi_prot_sg_count(cmd)) {
2149 uint32_t i, j = 0, k = 0, num_ent;
2150 struct scatterlist *sg;
2151 struct t10_pi_tuple *spt;
2153 /* Patch the corresponding protection tags */
2154 scsi_for_each_prot_sg(cmd, sg,
2155 scsi_prot_sg_count(cmd), i) {
2156 num_ent = sg_dma_len(sg) / 8;
2157 if (k + num_ent < blocks_done) {
2161 j = blocks_done - k - 1;
2166 if (k != blocks_done) {
2167 ql_log(ql_log_warn, vha, 0x302f,
2168 "unexpected tag values tag:lba=%x:%llx)\n",
2169 e_ref_tag, (unsigned long long)lba_s);
2173 spt = page_address(sg_page(sg)) + sg->offset;
2176 spt->app_tag = T10_PI_APP_ESCAPE;
2177 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2178 spt->ref_tag = T10_PI_REF_ESCAPE;
2185 if (e_guard != a_guard) {
2186 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2188 set_driver_byte(cmd, DRIVER_SENSE);
2189 set_host_byte(cmd, DID_ABORT);
2190 cmd->result |= SAM_STAT_CHECK_CONDITION;
2195 if (e_ref_tag != a_ref_tag) {
2196 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2198 set_driver_byte(cmd, DRIVER_SENSE);
2199 set_host_byte(cmd, DID_ABORT);
2200 cmd->result |= SAM_STAT_CHECK_CONDITION;
2204 /* check appl tag */
2205 if (e_app_tag != a_app_tag) {
2206 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2208 set_driver_byte(cmd, DRIVER_SENSE);
2209 set_host_byte(cmd, DID_ABORT);
2210 cmd->result |= SAM_STAT_CHECK_CONDITION;
2218 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2219 struct req_que *req, uint32_t index)
2221 struct qla_hw_data *ha = vha->hw;
2223 uint16_t comp_status;
2224 uint16_t scsi_status;
2226 uint32_t rval = EXT_STATUS_OK;
2227 struct bsg_job *bsg_job = NULL;
2228 struct fc_bsg_request *bsg_request;
2229 struct fc_bsg_reply *bsg_reply;
2231 struct sts_entry_24xx *sts24;
2232 sts = (sts_entry_t *) pkt;
2233 sts24 = (struct sts_entry_24xx *) pkt;
2235 /* Validate handle. */
2236 if (index >= req->num_outstanding_cmds) {
2237 ql_log(ql_log_warn, vha, 0x70af,
2238 "Invalid SCSI completion handle 0x%x.\n", index);
2239 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2243 sp = req->outstanding_cmds[index];
2245 ql_log(ql_log_warn, vha, 0x70b0,
2246 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2249 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2253 /* Free outstanding command slot. */
2254 req->outstanding_cmds[index] = NULL;
2255 bsg_job = sp->u.bsg_job;
2256 bsg_request = bsg_job->request;
2257 bsg_reply = bsg_job->reply;
2259 if (IS_FWI2_CAPABLE(ha)) {
2260 comp_status = le16_to_cpu(sts24->comp_status);
2261 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2263 comp_status = le16_to_cpu(sts->comp_status);
2264 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2267 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2268 switch (comp_status) {
2270 if (scsi_status == 0) {
2271 bsg_reply->reply_payload_rcv_len =
2272 bsg_job->reply_payload.payload_len;
2273 vha->qla_stats.input_bytes +=
2274 bsg_reply->reply_payload_rcv_len;
2275 vha->qla_stats.input_requests++;
2276 rval = EXT_STATUS_OK;
2280 case CS_DATA_OVERRUN:
2281 ql_dbg(ql_dbg_user, vha, 0x70b1,
2282 "Command completed with data overrun thread_id=%d\n",
2284 rval = EXT_STATUS_DATA_OVERRUN;
2287 case CS_DATA_UNDERRUN:
2288 ql_dbg(ql_dbg_user, vha, 0x70b2,
2289 "Command completed with data underrun thread_id=%d\n",
2291 rval = EXT_STATUS_DATA_UNDERRUN;
2293 case CS_BIDIR_RD_OVERRUN:
2294 ql_dbg(ql_dbg_user, vha, 0x70b3,
2295 "Command completed with read data overrun thread_id=%d\n",
2297 rval = EXT_STATUS_DATA_OVERRUN;
2300 case CS_BIDIR_RD_WR_OVERRUN:
2301 ql_dbg(ql_dbg_user, vha, 0x70b4,
2302 "Command completed with read and write data overrun "
2303 "thread_id=%d\n", thread_id);
2304 rval = EXT_STATUS_DATA_OVERRUN;
2307 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2308 ql_dbg(ql_dbg_user, vha, 0x70b5,
2309 "Command completed with read data over and write data "
2310 "underrun thread_id=%d\n", thread_id);
2311 rval = EXT_STATUS_DATA_OVERRUN;
2314 case CS_BIDIR_RD_UNDERRUN:
2315 ql_dbg(ql_dbg_user, vha, 0x70b6,
2316 "Command completed with read data underrun "
2317 "thread_id=%d\n", thread_id);
2318 rval = EXT_STATUS_DATA_UNDERRUN;
2321 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2322 ql_dbg(ql_dbg_user, vha, 0x70b7,
2323 "Command completed with read data under and write data "
2324 "overrun thread_id=%d\n", thread_id);
2325 rval = EXT_STATUS_DATA_UNDERRUN;
2328 case CS_BIDIR_RD_WR_UNDERRUN:
2329 ql_dbg(ql_dbg_user, vha, 0x70b8,
2330 "Command completed with read and write data underrun "
2331 "thread_id=%d\n", thread_id);
2332 rval = EXT_STATUS_DATA_UNDERRUN;
2336 ql_dbg(ql_dbg_user, vha, 0x70b9,
2337 "Command completed with data DMA error thread_id=%d\n",
2339 rval = EXT_STATUS_DMA_ERR;
2343 ql_dbg(ql_dbg_user, vha, 0x70ba,
2344 "Command completed with timeout thread_id=%d\n",
2346 rval = EXT_STATUS_TIMEOUT;
2349 ql_dbg(ql_dbg_user, vha, 0x70bb,
2350 "Command completed with completion status=0x%x "
2351 "thread_id=%d\n", comp_status, thread_id);
2352 rval = EXT_STATUS_ERR;
2355 bsg_reply->reply_payload_rcv_len = 0;
2358 /* Return the vendor specific reply to API */
2359 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2360 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2361 /* Always return DID_OK, bsg will send the vendor specific response
2362 * in this case only */
2363 sp->done(sp, DID_OK << 16);
2368 * qla2x00_status_entry() - Process a Status IOCB entry.
2369 * @vha: SCSI driver HA context
2370 * @rsp: response queue
2371 * @pkt: Entry pointer
2374 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2378 struct scsi_cmnd *cp;
2380 struct sts_entry_24xx *sts24;
2381 uint16_t comp_status;
2382 uint16_t scsi_status;
2384 uint8_t lscsi_status;
2386 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2388 uint8_t *rsp_info, *sense_data;
2389 struct qla_hw_data *ha = vha->hw;
2392 struct req_que *req;
2395 uint16_t state_flags = 0;
2396 uint16_t retry_delay = 0;
2398 sts = (sts_entry_t *) pkt;
2399 sts24 = (struct sts_entry_24xx *) pkt;
2400 if (IS_FWI2_CAPABLE(ha)) {
2401 comp_status = le16_to_cpu(sts24->comp_status);
2402 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2403 state_flags = le16_to_cpu(sts24->state_flags);
2405 comp_status = le16_to_cpu(sts->comp_status);
2406 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2408 handle = (uint32_t) LSW(sts->handle);
2409 que = MSW(sts->handle);
2410 req = ha->req_q_map[que];
2412 /* Check for invalid queue pointer */
2414 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2415 ql_dbg(ql_dbg_io, vha, 0x3059,
2416 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2417 "que=%u.\n", sts->handle, req, que);
2421 /* Validate handle. */
2422 if (handle < req->num_outstanding_cmds) {
2423 sp = req->outstanding_cmds[handle];
2425 ql_dbg(ql_dbg_io, vha, 0x3075,
2426 "%s(%ld): Already returned command for status handle (0x%x).\n",
2427 __func__, vha->host_no, sts->handle);
2431 ql_dbg(ql_dbg_io, vha, 0x3017,
2432 "Invalid status handle, out of range (0x%x).\n",
2435 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2436 if (IS_P3P_TYPE(ha))
2437 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2439 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2440 qla2xxx_wake_dpc(vha);
2445 if (sp->cmd_type != TYPE_SRB) {
2446 req->outstanding_cmds[handle] = NULL;
2447 ql_dbg(ql_dbg_io, vha, 0x3015,
2448 "Unknown sp->cmd_type %x %p).\n",
2453 /* NVME completion. */
2454 if (sp->type == SRB_NVME_CMD) {
2455 req->outstanding_cmds[handle] = NULL;
2456 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2460 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2461 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2465 /* Task Management completion. */
2466 if (sp->type == SRB_TM_CMD) {
2467 qla24xx_tm_iocb_entry(vha, req, pkt);
2471 /* Fast path completion. */
2472 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2473 qla2x00_process_completed_request(vha, req, handle);
2478 req->outstanding_cmds[handle] = NULL;
2479 cp = GET_CMD_SP(sp);
2481 ql_dbg(ql_dbg_io, vha, 0x3018,
2482 "Command already returned (0x%x/%p).\n",
2488 lscsi_status = scsi_status & STATUS_MASK;
2490 fcport = sp->fcport;
2493 sense_len = par_sense_len = rsp_info_len = resid_len =
2495 if (IS_FWI2_CAPABLE(ha)) {
2496 if (scsi_status & SS_SENSE_LEN_VALID)
2497 sense_len = le32_to_cpu(sts24->sense_len);
2498 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2499 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2500 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2501 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2502 if (comp_status == CS_DATA_UNDERRUN)
2503 fw_resid_len = le32_to_cpu(sts24->residual_len);
2504 rsp_info = sts24->data;
2505 sense_data = sts24->data;
2506 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2507 ox_id = le16_to_cpu(sts24->ox_id);
2508 par_sense_len = sizeof(sts24->data);
2509 /* Valid values of the retry delay timer are 0x1-0xffef */
2510 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2511 retry_delay = sts24->retry_delay & 0x3fff;
2512 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2513 "%s: scope=%#x retry_delay=%#x\n", __func__,
2514 sts24->retry_delay >> 14, retry_delay);
2517 if (scsi_status & SS_SENSE_LEN_VALID)
2518 sense_len = le16_to_cpu(sts->req_sense_length);
2519 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2520 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2521 resid_len = le32_to_cpu(sts->residual_length);
2522 rsp_info = sts->rsp_info;
2523 sense_data = sts->req_sense_data;
2524 par_sense_len = sizeof(sts->req_sense_data);
2527 /* Check for any FCP transport errors. */
2528 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2529 /* Sense data lies beyond any FCP RESPONSE data. */
2530 if (IS_FWI2_CAPABLE(ha)) {
2531 sense_data += rsp_info_len;
2532 par_sense_len -= rsp_info_len;
2534 if (rsp_info_len > 3 && rsp_info[3]) {
2535 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2536 "FCP I/O protocol failure (0x%x/0x%x).\n",
2537 rsp_info_len, rsp_info[3]);
2539 res = DID_BUS_BUSY << 16;
2544 /* Check for overrun. */
2545 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2546 scsi_status & SS_RESIDUAL_OVER)
2547 comp_status = CS_DATA_OVERRUN;
2550 * Check retry_delay_timer value if we receive a busy or
2553 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2554 lscsi_status == SAM_STAT_BUSY)
2555 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2558 * Based on Host and scsi status generate status code for Linux
2560 switch (comp_status) {
2563 if (scsi_status == 0) {
2567 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2569 scsi_set_resid(cp, resid);
2571 if (!lscsi_status &&
2572 ((unsigned)(scsi_bufflen(cp) - resid) <
2574 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2575 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2576 resid, scsi_bufflen(cp));
2578 res = DID_ERROR << 16;
2582 res = DID_OK << 16 | lscsi_status;
2584 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2585 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2586 "QUEUE FULL detected.\n");
2590 if (lscsi_status != SS_CHECK_CONDITION)
2593 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2594 if (!(scsi_status & SS_SENSE_LEN_VALID))
2597 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2601 case CS_DATA_UNDERRUN:
2602 /* Use F/W calculated residual length. */
2603 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2604 scsi_set_resid(cp, resid);
2605 if (scsi_status & SS_RESIDUAL_UNDER) {
2606 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2607 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2608 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2609 resid, scsi_bufflen(cp));
2611 res = DID_ERROR << 16 | lscsi_status;
2612 goto check_scsi_status;
2615 if (!lscsi_status &&
2616 ((unsigned)(scsi_bufflen(cp) - resid) <
2618 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2619 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2620 resid, scsi_bufflen(cp));
2622 res = DID_ERROR << 16;
2625 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2626 lscsi_status != SAM_STAT_BUSY) {
2628 * scsi status of task set and busy are considered to be
2629 * task not completed.
2632 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2633 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2634 resid, scsi_bufflen(cp));
2636 res = DID_ERROR << 16 | lscsi_status;
2637 goto check_scsi_status;
2639 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2640 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2641 scsi_status, lscsi_status);
2644 res = DID_OK << 16 | lscsi_status;
2649 * Check to see if SCSI Status is non zero. If so report SCSI
2652 if (lscsi_status != 0) {
2653 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2654 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2655 "QUEUE FULL detected.\n");
2659 if (lscsi_status != SS_CHECK_CONDITION)
2662 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2663 if (!(scsi_status & SS_SENSE_LEN_VALID))
2666 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2667 sense_len, rsp, res);
2671 case CS_PORT_LOGGED_OUT:
2672 case CS_PORT_CONFIG_CHG:
2675 case CS_PORT_UNAVAILABLE:
2680 * We are going to have the fc class block the rport
2681 * while we try to recover so instruct the mid layer
2682 * to requeue until the class decides how to handle this.
2684 res = DID_TRANSPORT_DISRUPTED << 16;
2686 if (comp_status == CS_TIMEOUT) {
2687 if (IS_FWI2_CAPABLE(ha))
2689 else if ((le16_to_cpu(sts->status_flags) &
2690 SF_LOGOUT_SENT) == 0)
2694 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2695 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2696 "Port to be marked lost on fcport=%02x%02x%02x, current "
2697 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2698 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2699 port_state_str[atomic_read(&fcport->state)],
2702 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2703 qlt_schedule_sess_for_deletion(fcport);
2709 res = DID_RESET << 16;
2713 logit = qla2x00_handle_dif_error(sp, sts24);
2718 res = DID_ERROR << 16;
2720 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2723 if (state_flags & BIT_4)
2724 scmd_printk(KERN_WARNING, cp,
2725 "Unsupported device '%s' found.\n",
2726 cp->device->vendor);
2730 res = DID_ERROR << 16;
2736 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2737 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2738 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2739 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2740 comp_status, scsi_status, res, vha->host_no,
2741 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2742 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2743 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2744 resid_len, fw_resid_len, sp, cp);
2746 if (rsp->status_srb == NULL)
2751 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2752 * @rsp: response queue
2753 * @pkt: Entry pointer
2755 * Extended sense data.
2758 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2760 uint8_t sense_sz = 0;
2761 struct qla_hw_data *ha = rsp->hw;
2762 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2763 srb_t *sp = rsp->status_srb;
2764 struct scsi_cmnd *cp;
2768 if (!sp || !GET_CMD_SENSE_LEN(sp))
2771 sense_len = GET_CMD_SENSE_LEN(sp);
2772 sense_ptr = GET_CMD_SENSE_PTR(sp);
2774 cp = GET_CMD_SP(sp);
2776 ql_log(ql_log_warn, vha, 0x3025,
2777 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2779 rsp->status_srb = NULL;
2783 if (sense_len > sizeof(pkt->data))
2784 sense_sz = sizeof(pkt->data);
2786 sense_sz = sense_len;
2788 /* Move sense data. */
2789 if (IS_FWI2_CAPABLE(ha))
2790 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2791 memcpy(sense_ptr, pkt->data, sense_sz);
2792 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2793 sense_ptr, sense_sz);
2795 sense_len -= sense_sz;
2796 sense_ptr += sense_sz;
2798 SET_CMD_SENSE_PTR(sp, sense_ptr);
2799 SET_CMD_SENSE_LEN(sp, sense_len);
2801 /* Place command on done queue. */
2802 if (sense_len == 0) {
2803 rsp->status_srb = NULL;
2804 sp->done(sp, cp->result);
2809 * qla2x00_error_entry() - Process an error entry.
2810 * @vha: SCSI driver HA context
2811 * @rsp: response queue
2812 * @pkt: Entry pointer
2813 * return : 1=allow further error analysis. 0=no additional error analysis.
2816 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2819 struct qla_hw_data *ha = vha->hw;
2820 const char func[] = "ERROR-IOCB";
2821 uint16_t que = MSW(pkt->handle);
2822 struct req_que *req = NULL;
2823 int res = DID_ERROR << 16;
2825 ql_dbg(ql_dbg_async, vha, 0x502a,
2826 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2827 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2829 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2832 req = ha->req_q_map[que];
2834 if (pkt->entry_status & RF_BUSY)
2835 res = DID_BUS_BUSY << 16;
2837 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2840 switch (pkt->entry_type) {
2841 case NOTIFY_ACK_TYPE:
2843 case STATUS_CONT_TYPE:
2844 case LOGINOUT_PORT_IOCB_TYPE:
2847 case ABORT_IOCB_TYPE:
2850 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2857 case ABTS_RESP_24XX:
2863 ql_log(ql_log_warn, vha, 0x5030,
2864 "Error entry - invalid handle/queue (%04x).\n", que);
2869 * qla24xx_mbx_completion() - Process mailbox command completions.
2870 * @vha: SCSI driver HA context
2871 * @mb0: Mailbox0 register
2874 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2878 uint16_t __iomem *wptr;
2879 struct qla_hw_data *ha = vha->hw;
2880 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2882 /* Read all mbox registers? */
2883 WARN_ON_ONCE(ha->mbx_count > 32);
2884 mboxes = (1ULL << ha->mbx_count) - 1;
2886 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2888 mboxes = ha->mcp->in_mb;
2890 /* Load return mailbox registers. */
2891 ha->flags.mbox_int = 1;
2892 ha->mailbox_out[0] = mb0;
2894 wptr = (uint16_t __iomem *)®->mailbox1;
2896 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2898 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2906 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2907 struct abort_entry_24xx *pkt)
2909 const char func[] = "ABT_IOCB";
2911 struct srb_iocb *abt;
2913 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2917 abt = &sp->u.iocb_cmd;
2918 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2922 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2923 struct pt_ls4_request *pkt, struct req_que *req)
2926 const char func[] = "LS4_IOCB";
2927 uint16_t comp_status;
2929 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2933 comp_status = le16_to_cpu(pkt->status);
2934 sp->done(sp, comp_status);
2938 * qla24xx_process_response_queue() - Process response queue entries.
2939 * @vha: SCSI driver HA context
2940 * @rsp: response queue
2942 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2943 struct rsp_que *rsp)
2945 struct sts_entry_24xx *pkt;
2946 struct qla_hw_data *ha = vha->hw;
2948 if (!ha->flags.fw_started)
2951 if (rsp->qpair->cpuid != smp_processor_id())
2952 qla_cpu_update(rsp->qpair, smp_processor_id());
2954 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2955 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2958 if (rsp->ring_index == rsp->length) {
2959 rsp->ring_index = 0;
2960 rsp->ring_ptr = rsp->ring;
2965 if (pkt->entry_status != 0) {
2966 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
2969 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2975 switch (pkt->entry_type) {
2977 qla2x00_status_entry(vha, rsp, pkt);
2979 case STATUS_CONT_TYPE:
2980 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2982 case VP_RPT_ID_IOCB_TYPE:
2983 qla24xx_report_id_acquisition(vha,
2984 (struct vp_rpt_id_entry_24xx *)pkt);
2986 case LOGINOUT_PORT_IOCB_TYPE:
2987 qla24xx_logio_entry(vha, rsp->req,
2988 (struct logio_entry_24xx *)pkt);
2991 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2994 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2996 case ABTS_RECV_24XX:
2997 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2998 /* ensure that the ATIO queue is empty */
2999 qlt_handle_abts_recv(vha, rsp,
3003 qlt_24xx_process_atio_queue(vha, 1);
3006 case ABTS_RESP_24XX:
3009 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3011 case PT_LS4_REQUEST:
3012 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3015 case NOTIFY_ACK_TYPE:
3016 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3017 qlt_response_pkt_all_vps(vha, rsp,
3020 qla24xxx_nack_iocb_entry(vha, rsp->req,
3021 (struct nack_to_isp *)pkt);
3024 /* Do nothing in this case, this check is to prevent it
3025 * from falling into default case
3028 case ABORT_IOCB_TYPE:
3029 qla24xx_abort_iocb_entry(vha, rsp->req,
3030 (struct abort_entry_24xx *)pkt);
3033 qla24xx_mbx_iocb_entry(vha, rsp->req,
3034 (struct mbx_24xx_entry *)pkt);
3036 case VP_CTRL_IOCB_TYPE:
3037 qla_ctrlvp_completed(vha, rsp->req,
3038 (struct vp_ctrl_entry_24xx *)pkt);
3041 /* Type Not Supported. */
3042 ql_dbg(ql_dbg_async, vha, 0x5042,
3043 "Received unknown response pkt type %x "
3044 "entry status=%x.\n",
3045 pkt->entry_type, pkt->entry_status);
3048 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3052 /* Adjust ring index */
3053 if (IS_P3P_TYPE(ha)) {
3054 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3055 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3057 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3062 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3066 struct qla_hw_data *ha = vha->hw;
3067 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3069 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3074 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3075 RD_REG_DWORD(®->iobase_addr);
3076 WRT_REG_DWORD(®->iobase_window, 0x0001);
3077 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3078 rval == QLA_SUCCESS; cnt--) {
3080 WRT_REG_DWORD(®->iobase_window, 0x0001);
3083 rval = QLA_FUNCTION_TIMEOUT;
3085 if (rval == QLA_SUCCESS)
3089 WRT_REG_DWORD(®->iobase_window, 0x0003);
3090 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3091 rval == QLA_SUCCESS; cnt--) {
3093 WRT_REG_DWORD(®->iobase_window, 0x0003);
3096 rval = QLA_FUNCTION_TIMEOUT;
3098 if (rval != QLA_SUCCESS)
3102 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3103 ql_log(ql_log_info, vha, 0x504c,
3104 "Additional code -- 0x55AA.\n");
3107 WRT_REG_DWORD(®->iobase_window, 0x0000);
3108 RD_REG_DWORD(®->iobase_window);
3112 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3114 * @dev_id: SCSI driver HA context
3116 * Called by system whenever the host adapter generates an interrupt.
3118 * Returns handled flag.
3121 qla24xx_intr_handler(int irq, void *dev_id)
3123 scsi_qla_host_t *vha;
3124 struct qla_hw_data *ha;
3125 struct device_reg_24xx __iomem *reg;
3131 struct rsp_que *rsp;
3132 unsigned long flags;
3133 bool process_atio = false;
3135 rsp = (struct rsp_que *) dev_id;
3137 ql_log(ql_log_info, NULL, 0x5059,
3138 "%s: NULL response queue pointer.\n", __func__);
3143 reg = &ha->iobase->isp24;
3146 if (unlikely(pci_channel_offline(ha->pdev)))
3149 spin_lock_irqsave(&ha->hardware_lock, flags);
3150 vha = pci_get_drvdata(ha->pdev);
3151 for (iter = 50; iter--; ) {
3152 stat = RD_REG_DWORD(®->host_status);
3153 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3155 if (stat & HSRX_RISC_PAUSED) {
3156 if (unlikely(pci_channel_offline(ha->pdev)))
3159 hccr = RD_REG_DWORD(®->hccr);
3161 ql_log(ql_log_warn, vha, 0x504b,
3162 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3165 qla2xxx_check_risc_status(vha);
3167 ha->isp_ops->fw_dump(vha, 1);
3168 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3170 } else if ((stat & HSRX_RISC_INT) == 0)
3173 switch (stat & 0xff) {
3174 case INTR_ROM_MB_SUCCESS:
3175 case INTR_ROM_MB_FAILED:
3176 case INTR_MB_SUCCESS:
3177 case INTR_MB_FAILED:
3178 qla24xx_mbx_completion(vha, MSW(stat));
3179 status |= MBX_INTERRUPT;
3182 case INTR_ASYNC_EVENT:
3184 mb[1] = RD_REG_WORD(®->mailbox1);
3185 mb[2] = RD_REG_WORD(®->mailbox2);
3186 mb[3] = RD_REG_WORD(®->mailbox3);
3187 qla2x00_async_event(vha, rsp, mb);
3189 case INTR_RSP_QUE_UPDATE:
3190 case INTR_RSP_QUE_UPDATE_83XX:
3191 qla24xx_process_response_queue(vha, rsp);
3193 case INTR_ATIO_QUE_UPDATE_27XX:
3194 case INTR_ATIO_QUE_UPDATE:
3195 process_atio = true;
3197 case INTR_ATIO_RSP_QUE_UPDATE:
3198 process_atio = true;
3199 qla24xx_process_response_queue(vha, rsp);
3202 ql_dbg(ql_dbg_async, vha, 0x504f,
3203 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3206 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3207 RD_REG_DWORD_RELAXED(®->hccr);
3208 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3211 qla2x00_handle_mbx_completion(ha, status);
3212 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3215 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3216 qlt_24xx_process_atio_queue(vha, 0);
3217 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3224 qla24xx_msix_rsp_q(int irq, void *dev_id)
3226 struct qla_hw_data *ha;
3227 struct rsp_que *rsp;
3228 struct device_reg_24xx __iomem *reg;
3229 struct scsi_qla_host *vha;
3230 unsigned long flags;
3232 rsp = (struct rsp_que *) dev_id;
3234 ql_log(ql_log_info, NULL, 0x505a,
3235 "%s: NULL response queue pointer.\n", __func__);
3239 reg = &ha->iobase->isp24;
3241 spin_lock_irqsave(&ha->hardware_lock, flags);
3243 vha = pci_get_drvdata(ha->pdev);
3244 qla24xx_process_response_queue(vha, rsp);
3245 if (!ha->flags.disable_msix_handshake) {
3246 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3247 RD_REG_DWORD_RELAXED(®->hccr);
3249 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3255 qla24xx_msix_default(int irq, void *dev_id)
3257 scsi_qla_host_t *vha;
3258 struct qla_hw_data *ha;
3259 struct rsp_que *rsp;
3260 struct device_reg_24xx __iomem *reg;
3265 unsigned long flags;
3266 bool process_atio = false;
3268 rsp = (struct rsp_que *) dev_id;
3270 ql_log(ql_log_info, NULL, 0x505c,
3271 "%s: NULL response queue pointer.\n", __func__);
3275 reg = &ha->iobase->isp24;
3278 spin_lock_irqsave(&ha->hardware_lock, flags);
3279 vha = pci_get_drvdata(ha->pdev);
3281 stat = RD_REG_DWORD(®->host_status);
3282 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3284 if (stat & HSRX_RISC_PAUSED) {
3285 if (unlikely(pci_channel_offline(ha->pdev)))
3288 hccr = RD_REG_DWORD(®->hccr);
3290 ql_log(ql_log_info, vha, 0x5050,
3291 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3294 qla2xxx_check_risc_status(vha);
3296 ha->isp_ops->fw_dump(vha, 1);
3297 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3299 } else if ((stat & HSRX_RISC_INT) == 0)
3302 switch (stat & 0xff) {
3303 case INTR_ROM_MB_SUCCESS:
3304 case INTR_ROM_MB_FAILED:
3305 case INTR_MB_SUCCESS:
3306 case INTR_MB_FAILED:
3307 qla24xx_mbx_completion(vha, MSW(stat));
3308 status |= MBX_INTERRUPT;
3311 case INTR_ASYNC_EVENT:
3313 mb[1] = RD_REG_WORD(®->mailbox1);
3314 mb[2] = RD_REG_WORD(®->mailbox2);
3315 mb[3] = RD_REG_WORD(®->mailbox3);
3316 qla2x00_async_event(vha, rsp, mb);
3318 case INTR_RSP_QUE_UPDATE:
3319 case INTR_RSP_QUE_UPDATE_83XX:
3320 qla24xx_process_response_queue(vha, rsp);
3322 case INTR_ATIO_QUE_UPDATE_27XX:
3323 case INTR_ATIO_QUE_UPDATE:
3324 process_atio = true;
3326 case INTR_ATIO_RSP_QUE_UPDATE:
3327 process_atio = true;
3328 qla24xx_process_response_queue(vha, rsp);
3331 ql_dbg(ql_dbg_async, vha, 0x5051,
3332 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3335 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3337 qla2x00_handle_mbx_completion(ha, status);
3338 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3341 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3342 qlt_24xx_process_atio_queue(vha, 0);
3343 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3350 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3352 struct qla_hw_data *ha;
3353 struct qla_qpair *qpair;
3354 struct device_reg_24xx __iomem *reg;
3355 unsigned long flags;
3359 ql_log(ql_log_info, NULL, 0x505b,
3360 "%s: NULL response queue pointer.\n", __func__);
3365 /* Clear the interrupt, if enabled, for this response queue */
3366 if (unlikely(!ha->flags.disable_msix_handshake)) {
3367 reg = &ha->iobase->isp24;
3368 spin_lock_irqsave(&ha->hardware_lock, flags);
3369 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3370 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3373 queue_work(ha->wq, &qpair->q_work);
3378 /* Interrupt handling helpers. */
3380 struct qla_init_msix_entry {
3382 irq_handler_t handler;
3385 static const struct qla_init_msix_entry msix_entries[] = {
3386 { "default", qla24xx_msix_default },
3387 { "rsp_q", qla24xx_msix_rsp_q },
3388 { "atio_q", qla83xx_msix_atio_q },
3389 { "qpair_multiq", qla2xxx_msix_rsp_q },
3392 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3393 { "qla2xxx (default)", qla82xx_msix_default },
3394 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3398 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3401 struct qla_msix_entry *qentry;
3402 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3403 int min_vecs = QLA_BASE_VECTORS;
3404 struct irq_affinity desc = {
3405 .pre_vectors = QLA_BASE_VECTORS,
3408 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3409 IS_ATIO_MSIX_CAPABLE(ha)) {
3414 if (USER_CTRL_IRQ(ha)) {
3415 /* user wants to control IRQ setting for target mode */
3416 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3417 ha->msix_count, PCI_IRQ_MSIX);
3419 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3420 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3424 ql_log(ql_log_fatal, vha, 0x00c7,
3425 "MSI-X: Failed to enable support, "
3426 "giving up -- %d/%d.\n",
3427 ha->msix_count, ret);
3429 } else if (ret < ha->msix_count) {
3430 ql_log(ql_log_info, vha, 0x00c6,
3431 "MSI-X: Using %d vectors\n", ret);
3432 ha->msix_count = ret;
3433 /* Recalculate queue values */
3434 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3435 ha->max_req_queues = ha->msix_count - 1;
3437 /* ATIOQ needs 1 vector. That's 1 less QPair */
3438 if (QLA_TGT_MODE_ENABLED())
3439 ha->max_req_queues--;
3441 ha->max_rsp_queues = ha->max_req_queues;
3443 ha->max_qpairs = ha->max_req_queues - 1;
3444 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3445 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3448 ha->msix_entries = kcalloc(ha->msix_count,
3449 sizeof(struct qla_msix_entry),
3451 if (!ha->msix_entries) {
3452 ql_log(ql_log_fatal, vha, 0x00c8,
3453 "Failed to allocate memory for ha->msix_entries.\n");
3457 ha->flags.msix_enabled = 1;
3459 for (i = 0; i < ha->msix_count; i++) {
3460 qentry = &ha->msix_entries[i];
3461 qentry->vector = pci_irq_vector(ha->pdev, i);
3463 qentry->have_irq = 0;
3465 qentry->handle = NULL;
3468 /* Enable MSI-X vectors for the base queue */
3469 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3470 qentry = &ha->msix_entries[i];
3471 qentry->handle = rsp;
3473 scnprintf(qentry->name, sizeof(qentry->name),
3474 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3475 if (IS_P3P_TYPE(ha))
3476 ret = request_irq(qentry->vector,
3477 qla82xx_msix_entries[i].handler,
3478 0, qla82xx_msix_entries[i].name, rsp);
3480 ret = request_irq(qentry->vector,
3481 msix_entries[i].handler,
3482 0, qentry->name, rsp);
3484 goto msix_register_fail;
3485 qentry->have_irq = 1;
3490 * If target mode is enable, also request the vector for the ATIO
3493 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3494 IS_ATIO_MSIX_CAPABLE(ha)) {
3495 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3497 qentry->handle = rsp;
3498 scnprintf(qentry->name, sizeof(qentry->name),
3499 "qla2xxx%lu_%s", vha->host_no,
3500 msix_entries[QLA_ATIO_VECTOR].name);
3502 ret = request_irq(qentry->vector,
3503 msix_entries[QLA_ATIO_VECTOR].handler,
3504 0, qentry->name, rsp);
3505 qentry->have_irq = 1;
3510 ql_log(ql_log_fatal, vha, 0x00cb,
3511 "MSI-X: unable to register handler -- %x/%d.\n",
3512 qentry->vector, ret);
3513 qla2x00_free_irqs(vha);
3518 /* Enable MSI-X vector for response queue update for queue 0 */
3519 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3520 if (ha->msixbase && ha->mqiobase &&
3521 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3526 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3529 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3530 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3531 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3532 ql_dbg(ql_dbg_init, vha, 0x0055,
3533 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3534 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3540 pci_free_irq_vectors(ha->pdev);
3545 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3547 int ret = QLA_FUNCTION_FAILED;
3548 device_reg_t *reg = ha->iobase;
3549 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3551 /* If possible, enable MSI-X. */
3552 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3553 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3554 !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
3557 if (ql2xenablemsix == 2)
3560 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3561 (ha->pdev->subsystem_device == 0x7040 ||
3562 ha->pdev->subsystem_device == 0x7041 ||
3563 ha->pdev->subsystem_device == 0x1705)) {
3564 ql_log(ql_log_warn, vha, 0x0034,
3565 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3566 ha->pdev->subsystem_vendor,
3567 ha->pdev->subsystem_device);
3571 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3572 ql_log(ql_log_warn, vha, 0x0035,
3573 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3574 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3578 ret = qla24xx_enable_msix(ha, rsp);
3580 ql_dbg(ql_dbg_init, vha, 0x0036,
3581 "MSI-X: Enabled (0x%X, 0x%X).\n",
3582 ha->chip_revision, ha->fw_attributes);
3583 goto clear_risc_ints;
3588 ql_log(ql_log_info, vha, 0x0037,
3589 "Falling back-to MSI mode -- ret=%d.\n", ret);
3591 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3592 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3596 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3598 ql_dbg(ql_dbg_init, vha, 0x0038,
3600 ha->flags.msi_enabled = 1;
3602 ql_log(ql_log_warn, vha, 0x0039,
3603 "Falling back-to INTa mode -- ret=%d.\n", ret);
3606 /* Skip INTx on ISP82xx. */
3607 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3608 return QLA_FUNCTION_FAILED;
3610 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3611 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3612 QLA2XXX_DRIVER_NAME, rsp);
3614 ql_log(ql_log_warn, vha, 0x003a,
3615 "Failed to reserve interrupt %d already in use.\n",
3618 } else if (!ha->flags.msi_enabled) {
3619 ql_dbg(ql_dbg_init, vha, 0x0125,
3620 "INTa mode: Enabled.\n");
3621 ha->flags.mr_intr_valid = 1;
3625 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3628 spin_lock_irq(&ha->hardware_lock);
3629 WRT_REG_WORD(®->isp.semaphore, 0);
3630 spin_unlock_irq(&ha->hardware_lock);
3637 qla2x00_free_irqs(scsi_qla_host_t *vha)
3639 struct qla_hw_data *ha = vha->hw;
3640 struct rsp_que *rsp;
3641 struct qla_msix_entry *qentry;
3645 * We need to check that ha->rsp_q_map is valid in case we are called
3646 * from a probe failure context.
3648 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3650 rsp = ha->rsp_q_map[0];
3652 if (ha->flags.msix_enabled) {
3653 for (i = 0; i < ha->msix_count; i++) {
3654 qentry = &ha->msix_entries[i];
3655 if (qentry->have_irq) {
3656 irq_set_affinity_notifier(qentry->vector, NULL);
3657 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3660 kfree(ha->msix_entries);
3661 ha->msix_entries = NULL;
3662 ha->flags.msix_enabled = 0;
3663 ql_dbg(ql_dbg_init, vha, 0x0042,
3664 "Disabled MSI-X.\n");
3666 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3670 pci_free_irq_vectors(ha->pdev);
3673 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3674 struct qla_msix_entry *msix, int vector_type)
3676 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3677 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3680 scnprintf(msix->name, sizeof(msix->name),
3681 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3682 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3684 ql_log(ql_log_fatal, vha, 0x00e6,
3685 "MSI-X: Unable to register handler -- %x/%d.\n",
3690 msix->handle = qpair;