2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23 static void qla_irq_affinity_release(struct kref *);
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
70 * Issue a "HARD" reset in order for the RISC interrupt
71 * bit to be cleared. Schedule a big hammer to get
72 * out of the RISC PAUSED state.
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
87 /* Get mailbox data. */
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
103 /* Release mailbox registers. */
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
107 qla2x00_process_response_queue(rsp);
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
116 return (IRQ_HANDLED);
120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 /* Check for PCI disconnection */
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 * Schedule this (only once) on the default system
129 * workqueue so that all the adapter workqueues and the
130 * DPC thread can be shutdown cleanly.
132 schedule_work(&vha->hw->board_disable);
140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148 * @dev_id: SCSI driver HA context
150 * Called by system whenever the host adapter generates an interrupt.
152 * Returns handled flag.
155 qla2300_intr_handler(int irq, void *dev_id)
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
165 struct qla_hw_data *ha;
168 rsp = (struct rsp_que *) dev_id;
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
176 reg = &ha->iobase->isp;
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
189 hccr = RD_REG_WORD(®->hccr);
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
201 * Issue a "HARD" reset in order for the RISC
202 * interrupt bit to be cleared. Schedule a big
203 * hammer to get out of the RISC PAUSED state.
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211 } else if ((stat & HSR_RISC_INT) == 0)
214 switch (stat & 0xff) {
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
222 /* Release mailbox registers. */
223 WRT_REG_WORD(®->semaphore, 0);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
233 qla2x00_process_response_queue(rsp);
236 mb[0] = MBA_CMPLT_1_16BIT;
238 qla2x00_async_event(vha, rsp, mb);
241 mb[0] = MBA_SCSI_COMPLETION;
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 return (IRQ_HANDLED);
261 * qla2x00_mbx_completion() - Process mailbox command completions.
262 * @ha: SCSI driver HA context
263 * @mb0: Mailbox0 register
266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274 /* Read all mbox registers? */
275 WARN_ON_ONCE(ha->mbx_count > 32);
276 mboxes = (1ULL << ha->mbx_count) - 1;
278 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
280 mboxes = ha->mcp->in_mb;
282 /* Load return mailbox registers. */
283 ha->flags.mbox_int = 1;
284 ha->mailbox_out[0] = mb0;
286 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
288 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
289 if (IS_QLA2200(ha) && cnt == 8)
290 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
292 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293 else if (mboxes & BIT_0)
294 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
302 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
304 static char *event[] =
305 { "Complete", "Request Notification", "Time Extension" };
307 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309 uint16_t __iomem *wptr;
310 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
312 /* Seed data -- mailbox1 -> mailbox7. */
313 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
314 wptr = (uint16_t __iomem *)®24->mailbox1;
315 else if (IS_QLA8044(vha->hw))
316 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
320 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
321 mb[cnt] = RD_REG_WORD(wptr);
323 ql_dbg(ql_dbg_async, vha, 0x5021,
324 "Inter-Driver Communication %s -- "
325 "%04x %04x %04x %04x %04x %04x %04x.\n",
326 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
327 mb[4], mb[5], mb[6]);
329 /* Handle IDC Error completion case. */
330 case MBA_IDC_COMPLETE:
332 vha->hw->flags.idc_compl_status = 1;
333 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334 complete(&vha->hw->dcbx_comp);
339 /* Acknowledgement needed? [Notify && non-zero timeout]. */
340 timeout = (descr >> 8) & 0xf;
341 ql_dbg(ql_dbg_async, vha, 0x5022,
342 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
343 vha->host_no, event[aen & 0xff], timeout);
347 rval = qla2x00_post_idc_ack_work(vha, mb);
348 if (rval != QLA_SUCCESS)
349 ql_log(ql_log_warn, vha, 0x5023,
350 "IDC failed to post ACK.\n");
352 case MBA_IDC_TIME_EXT:
353 vha->hw->idc_extend_tmo = descr;
354 ql_dbg(ql_dbg_async, vha, 0x5087,
355 "%lu Inter-Driver Communication %s -- "
356 "Extend timeout by=%d.\n",
357 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
364 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
366 static const char *const link_speeds[] = {
367 "1", "2", "?", "4", "8", "16", "32", "10"
369 #define QLA_LAST_SPEED 7
371 if (IS_QLA2100(ha) || IS_QLA2200(ha))
372 return link_speeds[0];
373 else if (speed == 0x13)
374 return link_speeds[QLA_LAST_SPEED];
375 else if (speed < QLA_LAST_SPEED)
376 return link_speeds[speed];
378 return link_speeds[LS_UNKNOWN];
382 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
384 struct qla_hw_data *ha = vha->hw;
387 * 8200 AEN Interpretation:
389 * mb[1] = AEN Reason code
390 * mb[2] = LSW of Peg-Halt Status-1 Register
391 * mb[6] = MSW of Peg-Halt Status-1 Register
392 * mb[3] = LSW of Peg-Halt Status-2 register
393 * mb[7] = MSW of Peg-Halt Status-2 register
394 * mb[4] = IDC Device-State Register value
395 * mb[5] = IDC Driver-Presence Register value
397 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
398 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
399 mb[0], mb[1], mb[2], mb[6]);
400 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
401 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
402 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
404 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
405 IDC_HEARTBEAT_FAILURE)) {
406 ha->flags.nic_core_hung = 1;
407 ql_log(ql_log_warn, vha, 0x5060,
408 "83XX: F/W Error Reported: Check if reset required.\n");
410 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
411 uint32_t protocol_engine_id, fw_err_code, err_level;
414 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
415 * - PEG-Halt Status-1 Register:
416 * (LSW = mb[2], MSW = mb[6])
417 * Bits 0-7 = protocol-engine ID
418 * Bits 8-28 = f/w error code
419 * Bits 29-31 = Error-level
420 * Error-level 0x1 = Non-Fatal error
421 * Error-level 0x2 = Recoverable Fatal error
422 * Error-level 0x4 = UnRecoverable Fatal error
423 * - PEG-Halt Status-2 Register:
424 * (LSW = mb[3], MSW = mb[7])
426 protocol_engine_id = (mb[2] & 0xff);
427 fw_err_code = (((mb[2] & 0xff00) >> 8) |
428 ((mb[6] & 0x1fff) << 8));
429 err_level = ((mb[6] & 0xe000) >> 13);
430 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
431 "Register: protocol_engine_id=0x%x "
432 "fw_err_code=0x%x err_level=0x%x.\n",
433 protocol_engine_id, fw_err_code, err_level);
434 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
435 "Register: 0x%x%x.\n", mb[7], mb[3]);
436 if (err_level == ERR_LEVEL_NON_FATAL) {
437 ql_log(ql_log_warn, vha, 0x5063,
438 "Not a fatal error, f/w has recovered "
440 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
441 ql_log(ql_log_fatal, vha, 0x5064,
442 "Recoverable Fatal error: Chip reset "
444 qla83xx_schedule_work(vha,
445 QLA83XX_NIC_CORE_RESET);
446 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
447 ql_log(ql_log_fatal, vha, 0x5065,
448 "Unrecoverable Fatal error: Set FAILED "
449 "state, reboot required.\n");
450 qla83xx_schedule_work(vha,
451 QLA83XX_NIC_CORE_UNRECOVERABLE);
455 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
456 uint16_t peg_fw_state, nw_interface_link_up;
457 uint16_t nw_interface_signal_detect, sfp_status;
458 uint16_t htbt_counter, htbt_monitor_enable;
459 uint16_t sfp_additonal_info, sfp_multirate;
460 uint16_t sfp_tx_fault, link_speed, dcbx_status;
463 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
464 * - PEG-to-FC Status Register:
465 * (LSW = mb[2], MSW = mb[6])
466 * Bits 0-7 = Peg-Firmware state
467 * Bit 8 = N/W Interface Link-up
468 * Bit 9 = N/W Interface signal detected
469 * Bits 10-11 = SFP Status
470 * SFP Status 0x0 = SFP+ transceiver not expected
471 * SFP Status 0x1 = SFP+ transceiver not present
472 * SFP Status 0x2 = SFP+ transceiver invalid
473 * SFP Status 0x3 = SFP+ transceiver present and
475 * Bits 12-14 = Heartbeat Counter
476 * Bit 15 = Heartbeat Monitor Enable
477 * Bits 16-17 = SFP Additional Info
478 * SFP info 0x0 = Unregocnized transceiver for
480 * SFP info 0x1 = SFP+ brand validation failed
481 * SFP info 0x2 = SFP+ speed validation failed
482 * SFP info 0x3 = SFP+ access error
483 * Bit 18 = SFP Multirate
484 * Bit 19 = SFP Tx Fault
485 * Bits 20-22 = Link Speed
486 * Bits 23-27 = Reserved
487 * Bits 28-30 = DCBX Status
488 * DCBX Status 0x0 = DCBX Disabled
489 * DCBX Status 0x1 = DCBX Enabled
490 * DCBX Status 0x2 = DCBX Exchange error
493 peg_fw_state = (mb[2] & 0x00ff);
494 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
495 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
496 sfp_status = ((mb[2] & 0x0c00) >> 10);
497 htbt_counter = ((mb[2] & 0x7000) >> 12);
498 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
499 sfp_additonal_info = (mb[6] & 0x0003);
500 sfp_multirate = ((mb[6] & 0x0004) >> 2);
501 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
502 link_speed = ((mb[6] & 0x0070) >> 4);
503 dcbx_status = ((mb[6] & 0x7000) >> 12);
505 ql_log(ql_log_warn, vha, 0x5066,
506 "Peg-to-Fc Status Register:\n"
507 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
508 "nw_interface_signal_detect=0x%x"
509 "\nsfp_statis=0x%x.\n ", peg_fw_state,
510 nw_interface_link_up, nw_interface_signal_detect,
512 ql_log(ql_log_warn, vha, 0x5067,
513 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
514 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
515 htbt_counter, htbt_monitor_enable,
516 sfp_additonal_info, sfp_multirate);
517 ql_log(ql_log_warn, vha, 0x5068,
518 "sfp_tx_fault=0x%x, link_state=0x%x, "
519 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
522 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
525 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
526 ql_log(ql_log_warn, vha, 0x5069,
527 "Heartbeat Failure encountered, chip reset "
530 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
534 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
535 ql_log(ql_log_info, vha, 0x506a,
536 "IDC Device-State changed = 0x%x.\n", mb[4]);
537 if (ha->flags.nic_core_reset_owner)
539 qla83xx_schedule_work(vha, MBA_IDC_AEN);
544 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
546 struct qla_hw_data *ha = vha->hw;
555 spin_lock_irqsave(&ha->vport_slock, flags);
556 list_for_each_entry(vp, &ha->vp_list, list) {
557 vp_did = vp->d_id.b24;
558 if (vp_did == rscn_entry) {
563 spin_unlock_irqrestore(&ha->vport_slock, flags);
568 static inline fc_port_t *
569 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
573 list_for_each_entry(fcport, &vha->vp_fcports, list)
574 if (fcport->loop_id == loop_id)
580 * qla2x00_async_event() - Process aynchronous events.
581 * @ha: SCSI driver HA context
582 * @mb: Mailbox registers (0 - 3)
585 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
590 struct qla_hw_data *ha = vha->hw;
591 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
592 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
593 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
594 uint32_t rscn_entry, host_pid;
596 fc_port_t *fcport = NULL;
598 /* Setup to process RIO completion. */
600 if (IS_CNA_CAPABLE(ha))
603 case MBA_SCSI_COMPLETION:
604 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
607 case MBA_CMPLT_1_16BIT:
610 mb[0] = MBA_SCSI_COMPLETION;
612 case MBA_CMPLT_2_16BIT:
616 mb[0] = MBA_SCSI_COMPLETION;
618 case MBA_CMPLT_3_16BIT:
623 mb[0] = MBA_SCSI_COMPLETION;
625 case MBA_CMPLT_4_16BIT:
629 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
631 mb[0] = MBA_SCSI_COMPLETION;
633 case MBA_CMPLT_5_16BIT:
637 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
638 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
640 mb[0] = MBA_SCSI_COMPLETION;
642 case MBA_CMPLT_2_32BIT:
643 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
644 handles[1] = le32_to_cpu(
645 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
646 RD_MAILBOX_REG(ha, reg, 6));
648 mb[0] = MBA_SCSI_COMPLETION;
655 case MBA_SCSI_COMPLETION: /* Fast Post */
656 if (!vha->flags.online)
659 for (cnt = 0; cnt < handle_cnt; cnt++)
660 qla2x00_process_completed_request(vha, rsp->req,
664 case MBA_RESET: /* Reset */
665 ql_dbg(ql_dbg_async, vha, 0x5002,
666 "Asynchronous RESET.\n");
668 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
671 case MBA_SYSTEM_ERR: /* System Error */
672 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
673 RD_REG_WORD(®24->mailbox7) : 0;
674 ql_log(ql_log_warn, vha, 0x5003,
675 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
676 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
678 ha->isp_ops->fw_dump(vha, 1);
680 if (IS_FWI2_CAPABLE(ha)) {
681 if (mb[1] == 0 && mb[2] == 0) {
682 ql_log(ql_log_fatal, vha, 0x5004,
683 "Unrecoverable Hardware Error: adapter "
684 "marked OFFLINE!\n");
685 vha->flags.online = 0;
686 vha->device_flags |= DFLG_DEV_FAILED;
688 /* Check to see if MPI timeout occurred */
689 if ((mbx & MBX_3) && (ha->port_no == 0))
690 set_bit(MPI_RESET_NEEDED,
693 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
695 } else if (mb[1] == 0) {
696 ql_log(ql_log_fatal, vha, 0x5005,
697 "Unrecoverable Hardware Error: adapter marked "
699 vha->flags.online = 0;
700 vha->device_flags |= DFLG_DEV_FAILED;
702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
705 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
706 ql_log(ql_log_warn, vha, 0x5006,
707 "ISP Request Transfer Error (%x).\n", mb[1]);
709 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
712 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
713 ql_log(ql_log_warn, vha, 0x5007,
714 "ISP Response Transfer Error (%x).\n", mb[1]);
716 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
719 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
720 ql_dbg(ql_dbg_async, vha, 0x5008,
721 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
724 case MBA_LOOP_INIT_ERR:
725 ql_log(ql_log_warn, vha, 0x5090,
726 "LOOP INIT ERROR (%x).\n", mb[1]);
727 ha->isp_ops->fw_dump(vha, 1);
728 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
731 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
732 ql_dbg(ql_dbg_async, vha, 0x5009,
733 "LIP occurred (%x).\n", mb[1]);
735 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
736 atomic_set(&vha->loop_state, LOOP_DOWN);
737 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
738 qla2x00_mark_all_devices_lost(vha, 1);
742 atomic_set(&vha->vp_state, VP_FAILED);
743 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
746 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
747 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
749 vha->flags.management_server_logged_in = 0;
750 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
753 case MBA_LOOP_UP: /* Loop Up Event */
754 if (IS_QLA2100(ha) || IS_QLA2200(ha))
755 ha->link_data_rate = PORT_SPEED_1GB;
757 ha->link_data_rate = mb[1];
759 ql_log(ql_log_info, vha, 0x500a,
760 "LOOP UP detected (%s Gbps).\n",
761 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
763 vha->flags.management_server_logged_in = 0;
764 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
767 case MBA_LOOP_DOWN: /* Loop Down Event */
768 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
769 ? RD_REG_WORD(®24->mailbox4) : 0;
770 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
772 ql_log(ql_log_info, vha, 0x500b,
773 "LOOP DOWN detected (%x %x %x %x).\n",
774 mb[1], mb[2], mb[3], mbx);
776 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
777 atomic_set(&vha->loop_state, LOOP_DOWN);
778 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
780 * In case of loop down, restore WWPN from
781 * NVRAM in case of FA-WWPN capable ISP
782 * Restore for Physical Port only
785 if (ha->flags.fawwpn_enabled) {
786 void *wwpn = ha->init_cb->port_name;
787 memcpy(vha->port_name, wwpn, WWN_SIZE);
788 fc_host_port_name(vha->host) =
789 wwn_to_u64(vha->port_name);
790 ql_dbg(ql_dbg_init + ql_dbg_verbose,
791 vha, 0x0144, "LOOP DOWN detected,"
792 "restore WWPN %016llx\n",
793 wwn_to_u64(vha->port_name));
796 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
799 vha->device_flags |= DFLG_NO_CABLE;
800 qla2x00_mark_all_devices_lost(vha, 1);
804 atomic_set(&vha->vp_state, VP_FAILED);
805 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
808 vha->flags.management_server_logged_in = 0;
809 ha->link_data_rate = PORT_SPEED_UNKNOWN;
810 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
813 case MBA_LIP_RESET: /* LIP reset occurred */
814 ql_dbg(ql_dbg_async, vha, 0x500c,
815 "LIP reset occurred (%x).\n", mb[1]);
817 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
818 atomic_set(&vha->loop_state, LOOP_DOWN);
819 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
820 qla2x00_mark_all_devices_lost(vha, 1);
824 atomic_set(&vha->vp_state, VP_FAILED);
825 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
828 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
830 ha->operating_mode = LOOP;
831 vha->flags.management_server_logged_in = 0;
832 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
835 /* case MBA_DCBX_COMPLETE: */
836 case MBA_POINT_TO_POINT: /* Point-to-Point */
840 if (IS_CNA_CAPABLE(ha)) {
841 ql_dbg(ql_dbg_async, vha, 0x500d,
842 "DCBX Completed -- %04x %04x %04x.\n",
843 mb[1], mb[2], mb[3]);
844 if (ha->notify_dcbx_comp && !vha->vp_idx)
845 complete(&ha->dcbx_comp);
848 ql_dbg(ql_dbg_async, vha, 0x500e,
849 "Asynchronous P2P MODE received.\n");
852 * Until there's a transition from loop down to loop up, treat
853 * this as loop down only.
855 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
856 atomic_set(&vha->loop_state, LOOP_DOWN);
857 if (!atomic_read(&vha->loop_down_timer))
858 atomic_set(&vha->loop_down_timer,
860 qla2x00_mark_all_devices_lost(vha, 1);
864 atomic_set(&vha->vp_state, VP_FAILED);
865 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
868 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
869 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
871 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
872 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
874 ha->flags.gpsc_supported = 1;
875 vha->flags.management_server_logged_in = 0;
878 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
882 ql_dbg(ql_dbg_async, vha, 0x500f,
883 "Configuration change detected: value=%x.\n", mb[1]);
885 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
886 atomic_set(&vha->loop_state, LOOP_DOWN);
887 if (!atomic_read(&vha->loop_down_timer))
888 atomic_set(&vha->loop_down_timer,
890 qla2x00_mark_all_devices_lost(vha, 1);
894 atomic_set(&vha->vp_state, VP_FAILED);
895 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
898 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
899 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
902 case MBA_PORT_UPDATE: /* Port database update */
904 * Handle only global and vn-port update events
907 * mb[1] = N_Port handle of changed port
908 * OR 0xffff for global event
909 * mb[2] = New login state
910 * 7 = Port logged out
911 * mb[3] = LSB is vp_idx, 0xff = all vps
913 * Skip processing if:
914 * Event is global, vp_idx is NOT all vps,
915 * vp_idx does not match
916 * Event is not global, vp_idx does not match
918 if (IS_QLA2XXX_MIDTYPE(ha) &&
919 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
920 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
924 ql_dbg(ql_dbg_async, vha, 0x5010,
925 "Port %s %04x %04x %04x.\n",
926 mb[1] == 0xffff ? "unavailable" : "logout",
927 mb[1], mb[2], mb[3]);
930 goto global_port_update;
933 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
936 if (atomic_read(&fcport->state) != FCS_ONLINE)
938 ql_dbg(ql_dbg_async, vha, 0x508a,
939 "Marking port lost loopid=%04x portid=%06x.\n",
940 fcport->loop_id, fcport->d_id.b24);
941 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
945 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
946 atomic_set(&vha->loop_state, LOOP_DOWN);
947 atomic_set(&vha->loop_down_timer,
949 vha->device_flags |= DFLG_NO_CABLE;
950 qla2x00_mark_all_devices_lost(vha, 1);
954 atomic_set(&vha->vp_state, VP_FAILED);
955 fc_vport_set_state(vha->fc_vport,
957 qla2x00_mark_all_devices_lost(vha, 1);
960 vha->flags.management_server_logged_in = 0;
961 ha->link_data_rate = PORT_SPEED_UNKNOWN;
966 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
967 * event etc. earlier indicating loop is down) then process
968 * it. Otherwise ignore it and Wait for RSCN to come in.
970 atomic_set(&vha->loop_down_timer, 0);
971 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
972 atomic_read(&vha->loop_state) != LOOP_DEAD) {
973 ql_dbg(ql_dbg_async, vha, 0x5011,
974 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
975 mb[1], mb[2], mb[3]);
979 ql_dbg(ql_dbg_async, vha, 0x5012,
980 "Port database changed %04x %04x %04x.\n",
981 mb[1], mb[2], mb[3]);
984 * Mark all devices as missing so we will login again.
986 atomic_set(&vha->loop_state, LOOP_UP);
988 qla2x00_mark_all_devices_lost(vha, 1);
990 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
991 set_bit(SCR_PENDING, &vha->dpc_flags);
993 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
994 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
995 set_bit(VP_CONFIG_OK, &vha->vp_flags);
998 case MBA_RSCN_UPDATE: /* State Change Registration */
999 /* Check if the Vport has issued a SCR */
1000 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1002 /* Only handle SCNs for our Vport index. */
1003 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1006 ql_dbg(ql_dbg_async, vha, 0x5013,
1007 "RSCN database changed -- %04x %04x %04x.\n",
1008 mb[1], mb[2], mb[3]);
1010 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1011 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1012 | vha->d_id.b.al_pa;
1013 if (rscn_entry == host_pid) {
1014 ql_dbg(ql_dbg_async, vha, 0x5014,
1015 "Ignoring RSCN update to local host "
1016 "port ID (%06x).\n", host_pid);
1020 /* Ignore reserved bits from RSCN-payload. */
1021 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1023 /* Skip RSCNs for virtual ports on the same physical port */
1024 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1028 * Search for the rport related to this RSCN entry and mark it
1031 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1032 if (atomic_read(&fcport->state) != FCS_ONLINE)
1034 if (fcport->d_id.b24 == rscn_entry) {
1035 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1040 atomic_set(&vha->loop_down_timer, 0);
1041 vha->flags.management_server_logged_in = 0;
1043 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1044 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1045 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1048 /* case MBA_RIO_RESPONSE: */
1049 case MBA_ZIO_RESPONSE:
1050 ql_dbg(ql_dbg_async, vha, 0x5015,
1051 "[R|Z]IO update completion.\n");
1053 if (IS_FWI2_CAPABLE(ha))
1054 qla24xx_process_response_queue(vha, rsp);
1056 qla2x00_process_response_queue(rsp);
1059 case MBA_DISCARD_RND_FRAME:
1060 ql_dbg(ql_dbg_async, vha, 0x5016,
1061 "Discard RND Frame -- %04x %04x %04x.\n",
1062 mb[1], mb[2], mb[3]);
1065 case MBA_TRACE_NOTIFICATION:
1066 ql_dbg(ql_dbg_async, vha, 0x5017,
1067 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1070 case MBA_ISP84XX_ALERT:
1071 ql_dbg(ql_dbg_async, vha, 0x5018,
1072 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1073 mb[1], mb[2], mb[3]);
1075 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1077 case A84_PANIC_RECOVERY:
1078 ql_log(ql_log_info, vha, 0x5019,
1079 "Alert 84XX: panic recovery %04x %04x.\n",
1082 case A84_OP_LOGIN_COMPLETE:
1083 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1084 ql_log(ql_log_info, vha, 0x501a,
1085 "Alert 84XX: firmware version %x.\n",
1086 ha->cs84xx->op_fw_version);
1088 case A84_DIAG_LOGIN_COMPLETE:
1089 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1090 ql_log(ql_log_info, vha, 0x501b,
1091 "Alert 84XX: diagnostic firmware version %x.\n",
1092 ha->cs84xx->diag_fw_version);
1094 case A84_GOLD_LOGIN_COMPLETE:
1095 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1096 ha->cs84xx->fw_update = 1;
1097 ql_log(ql_log_info, vha, 0x501c,
1098 "Alert 84XX: gold firmware version %x.\n",
1099 ha->cs84xx->gold_fw_version);
1102 ql_log(ql_log_warn, vha, 0x501d,
1103 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1104 mb[1], mb[2], mb[3]);
1106 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1108 case MBA_DCBX_START:
1109 ql_dbg(ql_dbg_async, vha, 0x501e,
1110 "DCBX Started -- %04x %04x %04x.\n",
1111 mb[1], mb[2], mb[3]);
1113 case MBA_DCBX_PARAM_UPDATE:
1114 ql_dbg(ql_dbg_async, vha, 0x501f,
1115 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1116 mb[1], mb[2], mb[3]);
1118 case MBA_FCF_CONF_ERR:
1119 ql_dbg(ql_dbg_async, vha, 0x5020,
1120 "FCF Configuration Error -- %04x %04x %04x.\n",
1121 mb[1], mb[2], mb[3]);
1123 case MBA_IDC_NOTIFY:
1124 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1125 mb[4] = RD_REG_WORD(®24->mailbox4);
1126 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1127 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1128 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1129 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1131 * Extend loop down timer since port is active.
1133 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1134 atomic_set(&vha->loop_down_timer,
1136 qla2xxx_wake_dpc(vha);
1139 case MBA_IDC_COMPLETE:
1140 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1141 complete(&ha->lb_portup_comp);
1143 case MBA_IDC_TIME_EXT:
1144 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1146 qla81xx_idc_event(vha, mb[0], mb[1]);
1150 mb[4] = RD_REG_WORD(®24->mailbox4);
1151 mb[5] = RD_REG_WORD(®24->mailbox5);
1152 mb[6] = RD_REG_WORD(®24->mailbox6);
1153 mb[7] = RD_REG_WORD(®24->mailbox7);
1154 qla83xx_handle_8200_aen(vha, mb);
1157 case MBA_DPORT_DIAGNOSTICS:
1158 ql_dbg(ql_dbg_async, vha, 0x5052,
1159 "D-Port Diagnostics: %04x result=%s\n",
1161 mb[1] == 0 ? "start" :
1162 mb[1] == 1 ? "done (pass)" :
1163 mb[1] == 2 ? "done (error)" : "other");
1166 case MBA_TEMPERATURE_ALERT:
1167 ql_dbg(ql_dbg_async, vha, 0x505e,
1168 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1170 schedule_work(&ha->board_disable);
1174 ql_dbg(ql_dbg_async, vha, 0x5057,
1175 "Unknown AEN:%04x %04x %04x %04x\n",
1176 mb[0], mb[1], mb[2], mb[3]);
1179 qlt_async_event(mb[0], vha, mb);
1181 if (!vha->vp_idx && ha->num_vhosts)
1182 qla2x00_alert_all_vps(rsp, mb);
1186 * qla2x00_process_completed_request() - Process a Fast Post response.
1187 * @ha: SCSI driver HA context
1191 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1192 struct req_que *req, uint32_t index)
1195 struct qla_hw_data *ha = vha->hw;
1197 /* Validate handle. */
1198 if (index >= req->num_outstanding_cmds) {
1199 ql_log(ql_log_warn, vha, 0x3014,
1200 "Invalid SCSI command index (%x).\n", index);
1202 if (IS_P3P_TYPE(ha))
1203 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1205 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1209 sp = req->outstanding_cmds[index];
1211 /* Free outstanding command slot. */
1212 req->outstanding_cmds[index] = NULL;
1214 /* Save ISP completion status */
1215 sp->done(ha, sp, DID_OK << 16);
1217 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1219 if (IS_P3P_TYPE(ha))
1220 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1222 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1227 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1228 struct req_que *req, void *iocb)
1230 struct qla_hw_data *ha = vha->hw;
1231 sts_entry_t *pkt = iocb;
1235 index = LSW(pkt->handle);
1236 if (index >= req->num_outstanding_cmds) {
1237 ql_log(ql_log_warn, vha, 0x5031,
1238 "Invalid command index (%x).\n", index);
1239 if (IS_P3P_TYPE(ha))
1240 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1242 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1245 sp = req->outstanding_cmds[index];
1247 ql_log(ql_log_warn, vha, 0x5032,
1248 "Invalid completion handle (%x) -- timed-out.\n", index);
1251 if (sp->handle != index) {
1252 ql_log(ql_log_warn, vha, 0x5033,
1253 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1257 req->outstanding_cmds[index] = NULL;
1264 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1265 struct mbx_entry *mbx)
1267 const char func[] = "MBX-IOCB";
1271 struct srb_iocb *lio;
1275 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1279 lio = &sp->u.iocb_cmd;
1281 fcport = sp->fcport;
1282 data = lio->u.logio.data;
1284 data[0] = MBS_COMMAND_ERROR;
1285 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1286 QLA_LOGIO_LOGIN_RETRIED : 0;
1287 if (mbx->entry_status) {
1288 ql_dbg(ql_dbg_async, vha, 0x5043,
1289 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1290 "entry-status=%x status=%x state-flag=%x "
1291 "status-flags=%x.\n", type, sp->handle,
1292 fcport->d_id.b.domain, fcport->d_id.b.area,
1293 fcport->d_id.b.al_pa, mbx->entry_status,
1294 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1295 le16_to_cpu(mbx->status_flags));
1297 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1298 (uint8_t *)mbx, sizeof(*mbx));
1303 status = le16_to_cpu(mbx->status);
1304 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1305 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1307 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1308 ql_dbg(ql_dbg_async, vha, 0x5045,
1309 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1310 type, sp->handle, fcport->d_id.b.domain,
1311 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1312 le16_to_cpu(mbx->mb1));
1314 data[0] = MBS_COMMAND_COMPLETE;
1315 if (sp->type == SRB_LOGIN_CMD) {
1316 fcport->port_type = FCT_TARGET;
1317 if (le16_to_cpu(mbx->mb1) & BIT_0)
1318 fcport->port_type = FCT_INITIATOR;
1319 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1320 fcport->flags |= FCF_FCP2_DEVICE;
1325 data[0] = le16_to_cpu(mbx->mb0);
1327 case MBS_PORT_ID_USED:
1328 data[1] = le16_to_cpu(mbx->mb1);
1330 case MBS_LOOP_ID_USED:
1333 data[0] = MBS_COMMAND_ERROR;
1337 ql_log(ql_log_warn, vha, 0x5046,
1338 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1339 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1340 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1341 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1342 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1343 le16_to_cpu(mbx->mb7));
1346 sp->done(vha, sp, 0);
1350 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1351 sts_entry_t *pkt, int iocb_type)
1353 const char func[] = "CT_IOCB";
1356 struct fc_bsg_job *bsg_job;
1357 uint16_t comp_status;
1360 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1364 bsg_job = sp->u.bsg_job;
1366 type = "ct pass-through";
1368 comp_status = le16_to_cpu(pkt->comp_status);
1370 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1371 * fc payload to the caller
1373 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1374 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1376 if (comp_status != CS_COMPLETE) {
1377 if (comp_status == CS_DATA_UNDERRUN) {
1379 bsg_job->reply->reply_payload_rcv_len =
1380 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1382 ql_log(ql_log_warn, vha, 0x5048,
1383 "CT pass-through-%s error "
1384 "comp_status-status=0x%x total_byte = 0x%x.\n",
1386 bsg_job->reply->reply_payload_rcv_len);
1388 ql_log(ql_log_warn, vha, 0x5049,
1389 "CT pass-through-%s error "
1390 "comp_status-status=0x%x.\n", type, comp_status);
1391 res = DID_ERROR << 16;
1392 bsg_job->reply->reply_payload_rcv_len = 0;
1394 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1395 (uint8_t *)pkt, sizeof(*pkt));
1398 bsg_job->reply->reply_payload_rcv_len =
1399 bsg_job->reply_payload.payload_len;
1400 bsg_job->reply_len = 0;
1403 sp->done(vha, sp, res);
1407 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1408 struct sts_entry_24xx *pkt, int iocb_type)
1410 const char func[] = "ELS_CT_IOCB";
1413 struct fc_bsg_job *bsg_job;
1414 uint16_t comp_status;
1415 uint32_t fw_status[3];
1416 uint8_t* fw_sts_ptr;
1419 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1422 bsg_job = sp->u.bsg_job;
1426 case SRB_ELS_CMD_RPT:
1427 case SRB_ELS_CMD_HST:
1431 type = "ct pass-through";
1434 type = "Driver ELS logo";
1435 ql_dbg(ql_dbg_user, vha, 0x5047,
1436 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1437 sp->done(vha, sp, 0);
1440 ql_dbg(ql_dbg_user, vha, 0x503e,
1441 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1445 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1446 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1447 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1449 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1450 * fc payload to the caller
1452 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1453 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1455 if (comp_status != CS_COMPLETE) {
1456 if (comp_status == CS_DATA_UNDERRUN) {
1458 bsg_job->reply->reply_payload_rcv_len =
1459 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1461 ql_dbg(ql_dbg_user, vha, 0x503f,
1462 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1463 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1464 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1465 le16_to_cpu(((struct els_sts_entry_24xx *)
1466 pkt)->total_byte_count));
1467 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1468 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1471 ql_dbg(ql_dbg_user, vha, 0x5040,
1472 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1473 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1474 type, sp->handle, comp_status,
1475 le16_to_cpu(((struct els_sts_entry_24xx *)
1476 pkt)->error_subcode_1),
1477 le16_to_cpu(((struct els_sts_entry_24xx *)
1478 pkt)->error_subcode_2));
1479 res = DID_ERROR << 16;
1480 bsg_job->reply->reply_payload_rcv_len = 0;
1481 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1482 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1484 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1485 (uint8_t *)pkt, sizeof(*pkt));
1489 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1490 bsg_job->reply_len = 0;
1493 sp->done(vha, sp, res);
1497 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1498 struct logio_entry_24xx *logio)
1500 const char func[] = "LOGIO-IOCB";
1504 struct srb_iocb *lio;
1508 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1512 lio = &sp->u.iocb_cmd;
1514 fcport = sp->fcport;
1515 data = lio->u.logio.data;
1517 data[0] = MBS_COMMAND_ERROR;
1518 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1519 QLA_LOGIO_LOGIN_RETRIED : 0;
1520 if (logio->entry_status) {
1521 ql_log(ql_log_warn, fcport->vha, 0x5034,
1522 "Async-%s error entry - hdl=%x"
1523 "portid=%02x%02x%02x entry-status=%x.\n",
1524 type, sp->handle, fcport->d_id.b.domain,
1525 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1526 logio->entry_status);
1527 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1528 (uint8_t *)logio, sizeof(*logio));
1533 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1534 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1535 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1536 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1537 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1538 le32_to_cpu(logio->io_parameter[0]));
1540 data[0] = MBS_COMMAND_COMPLETE;
1541 if (sp->type != SRB_LOGIN_CMD)
1544 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1545 if (iop[0] & BIT_4) {
1546 fcport->port_type = FCT_TARGET;
1548 fcport->flags |= FCF_FCP2_DEVICE;
1549 } else if (iop[0] & BIT_5)
1550 fcport->port_type = FCT_INITIATOR;
1553 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1555 if (logio->io_parameter[7] || logio->io_parameter[8])
1556 fcport->supported_classes |= FC_COS_CLASS2;
1557 if (logio->io_parameter[9] || logio->io_parameter[10])
1558 fcport->supported_classes |= FC_COS_CLASS3;
1563 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1564 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1566 case LSC_SCODE_PORTID_USED:
1567 data[0] = MBS_PORT_ID_USED;
1568 data[1] = LSW(iop[1]);
1570 case LSC_SCODE_NPORT_USED:
1571 data[0] = MBS_LOOP_ID_USED;
1574 data[0] = MBS_COMMAND_ERROR;
1578 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1579 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1580 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1581 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1582 le16_to_cpu(logio->comp_status),
1583 le32_to_cpu(logio->io_parameter[0]),
1584 le32_to_cpu(logio->io_parameter[1]));
1587 sp->done(vha, sp, 0);
1591 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1593 const char func[] = "TMF-IOCB";
1597 struct srb_iocb *iocb;
1598 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1600 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1604 iocb = &sp->u.iocb_cmd;
1606 fcport = sp->fcport;
1607 iocb->u.tmf.data = QLA_SUCCESS;
1609 if (sts->entry_status) {
1610 ql_log(ql_log_warn, fcport->vha, 0x5038,
1611 "Async-%s error - hdl=%x entry-status(%x).\n",
1612 type, sp->handle, sts->entry_status);
1613 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1614 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1615 ql_log(ql_log_warn, fcport->vha, 0x5039,
1616 "Async-%s error - hdl=%x completion status(%x).\n",
1617 type, sp->handle, sts->comp_status);
1618 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1619 } else if ((le16_to_cpu(sts->scsi_status) &
1620 SS_RESPONSE_INFO_LEN_VALID)) {
1621 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1622 ql_log(ql_log_warn, fcport->vha, 0x503b,
1623 "Async-%s error - hdl=%x not enough response(%d).\n",
1624 type, sp->handle, sts->rsp_data_len);
1625 } else if (sts->data[3]) {
1626 ql_log(ql_log_warn, fcport->vha, 0x503c,
1627 "Async-%s error - hdl=%x response(%x).\n",
1628 type, sp->handle, sts->data[3]);
1629 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1633 if (iocb->u.tmf.data != QLA_SUCCESS)
1634 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1635 (uint8_t *)sts, sizeof(*sts));
1637 sp->done(vha, sp, 0);
1641 * qla2x00_process_response_queue() - Process response queue entries.
1642 * @ha: SCSI driver HA context
1645 qla2x00_process_response_queue(struct rsp_que *rsp)
1647 struct scsi_qla_host *vha;
1648 struct qla_hw_data *ha = rsp->hw;
1649 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1651 uint16_t handle_cnt;
1654 vha = pci_get_drvdata(ha->pdev);
1656 if (!vha->flags.online)
1659 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1660 pkt = (sts_entry_t *)rsp->ring_ptr;
1663 if (rsp->ring_index == rsp->length) {
1664 rsp->ring_index = 0;
1665 rsp->ring_ptr = rsp->ring;
1670 if (pkt->entry_status != 0) {
1671 qla2x00_error_entry(vha, rsp, pkt);
1672 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1677 switch (pkt->entry_type) {
1679 qla2x00_status_entry(vha, rsp, pkt);
1681 case STATUS_TYPE_21:
1682 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1683 for (cnt = 0; cnt < handle_cnt; cnt++) {
1684 qla2x00_process_completed_request(vha, rsp->req,
1685 ((sts21_entry_t *)pkt)->handle[cnt]);
1688 case STATUS_TYPE_22:
1689 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1690 for (cnt = 0; cnt < handle_cnt; cnt++) {
1691 qla2x00_process_completed_request(vha, rsp->req,
1692 ((sts22_entry_t *)pkt)->handle[cnt]);
1695 case STATUS_CONT_TYPE:
1696 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1699 qla2x00_mbx_iocb_entry(vha, rsp->req,
1700 (struct mbx_entry *)pkt);
1703 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1706 /* Type Not Supported. */
1707 ql_log(ql_log_warn, vha, 0x504a,
1708 "Received unknown response pkt type %x "
1709 "entry status=%x.\n",
1710 pkt->entry_type, pkt->entry_status);
1713 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1717 /* Adjust ring index */
1718 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1722 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1723 uint32_t sense_len, struct rsp_que *rsp, int res)
1725 struct scsi_qla_host *vha = sp->fcport->vha;
1726 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1727 uint32_t track_sense_len;
1729 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1730 sense_len = SCSI_SENSE_BUFFERSIZE;
1732 SET_CMD_SENSE_LEN(sp, sense_len);
1733 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1734 track_sense_len = sense_len;
1736 if (sense_len > par_sense_len)
1737 sense_len = par_sense_len;
1739 memcpy(cp->sense_buffer, sense_data, sense_len);
1741 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1742 track_sense_len -= sense_len;
1743 SET_CMD_SENSE_LEN(sp, track_sense_len);
1745 if (track_sense_len != 0) {
1746 rsp->status_srb = sp;
1751 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1752 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1753 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1755 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1756 cp->sense_buffer, sense_len);
1760 struct scsi_dif_tuple {
1761 __be16 guard; /* Checksum */
1762 __be16 app_tag; /* APPL identifier */
1763 __be32 ref_tag; /* Target LBA or indirect LBA */
1767 * Checks the guard or meta-data for the type of error
1768 * detected by the HBA. In case of errors, we set the
1769 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1770 * to indicate to the kernel that the HBA detected error.
1773 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1775 struct scsi_qla_host *vha = sp->fcport->vha;
1776 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1777 uint8_t *ap = &sts24->data[12];
1778 uint8_t *ep = &sts24->data[20];
1779 uint32_t e_ref_tag, a_ref_tag;
1780 uint16_t e_app_tag, a_app_tag;
1781 uint16_t e_guard, a_guard;
1784 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1785 * would make guard field appear at offset 2
1787 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1788 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1789 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1790 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1791 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1792 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1794 ql_dbg(ql_dbg_io, vha, 0x3023,
1795 "iocb(s) %p Returned STATUS.\n", sts24);
1797 ql_dbg(ql_dbg_io, vha, 0x3024,
1798 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1799 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1800 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1801 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1802 a_app_tag, e_app_tag, a_guard, e_guard);
1806 * For type 3: ref & app tag is all 'f's
1807 * For type 0,1,2: app tag is all 'f's
1809 if ((a_app_tag == 0xffff) &&
1810 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1811 (a_ref_tag == 0xffffffff))) {
1812 uint32_t blocks_done, resid;
1813 sector_t lba_s = scsi_get_lba(cmd);
1815 /* 2TB boundary case covered automatically with this */
1816 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1818 resid = scsi_bufflen(cmd) - (blocks_done *
1819 cmd->device->sector_size);
1821 scsi_set_resid(cmd, resid);
1822 cmd->result = DID_OK << 16;
1824 /* Update protection tag */
1825 if (scsi_prot_sg_count(cmd)) {
1826 uint32_t i, j = 0, k = 0, num_ent;
1827 struct scatterlist *sg;
1828 struct t10_pi_tuple *spt;
1830 /* Patch the corresponding protection tags */
1831 scsi_for_each_prot_sg(cmd, sg,
1832 scsi_prot_sg_count(cmd), i) {
1833 num_ent = sg_dma_len(sg) / 8;
1834 if (k + num_ent < blocks_done) {
1838 j = blocks_done - k - 1;
1843 if (k != blocks_done) {
1844 ql_log(ql_log_warn, vha, 0x302f,
1845 "unexpected tag values tag:lba=%x:%llx)\n",
1846 e_ref_tag, (unsigned long long)lba_s);
1850 spt = page_address(sg_page(sg)) + sg->offset;
1853 spt->app_tag = 0xffff;
1854 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1855 spt->ref_tag = 0xffffffff;
1862 if (e_guard != a_guard) {
1863 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1865 set_driver_byte(cmd, DRIVER_SENSE);
1866 set_host_byte(cmd, DID_ABORT);
1867 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1872 if (e_ref_tag != a_ref_tag) {
1873 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1875 set_driver_byte(cmd, DRIVER_SENSE);
1876 set_host_byte(cmd, DID_ABORT);
1877 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1881 /* check appl tag */
1882 if (e_app_tag != a_app_tag) {
1883 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1885 set_driver_byte(cmd, DRIVER_SENSE);
1886 set_host_byte(cmd, DID_ABORT);
1887 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1895 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1896 struct req_que *req, uint32_t index)
1898 struct qla_hw_data *ha = vha->hw;
1900 uint16_t comp_status;
1901 uint16_t scsi_status;
1903 uint32_t rval = EXT_STATUS_OK;
1904 struct fc_bsg_job *bsg_job = NULL;
1906 struct sts_entry_24xx *sts24;
1907 sts = (sts_entry_t *) pkt;
1908 sts24 = (struct sts_entry_24xx *) pkt;
1910 /* Validate handle. */
1911 if (index >= req->num_outstanding_cmds) {
1912 ql_log(ql_log_warn, vha, 0x70af,
1913 "Invalid SCSI completion handle 0x%x.\n", index);
1914 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1918 sp = req->outstanding_cmds[index];
1920 /* Free outstanding command slot. */
1921 req->outstanding_cmds[index] = NULL;
1922 bsg_job = sp->u.bsg_job;
1924 ql_log(ql_log_warn, vha, 0x70b0,
1925 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1928 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1932 if (IS_FWI2_CAPABLE(ha)) {
1933 comp_status = le16_to_cpu(sts24->comp_status);
1934 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1936 comp_status = le16_to_cpu(sts->comp_status);
1937 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1940 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1941 switch (comp_status) {
1943 if (scsi_status == 0) {
1944 bsg_job->reply->reply_payload_rcv_len =
1945 bsg_job->reply_payload.payload_len;
1946 vha->qla_stats.input_bytes +=
1947 bsg_job->reply->reply_payload_rcv_len;
1948 vha->qla_stats.input_requests++;
1949 rval = EXT_STATUS_OK;
1953 case CS_DATA_OVERRUN:
1954 ql_dbg(ql_dbg_user, vha, 0x70b1,
1955 "Command completed with date overrun thread_id=%d\n",
1957 rval = EXT_STATUS_DATA_OVERRUN;
1960 case CS_DATA_UNDERRUN:
1961 ql_dbg(ql_dbg_user, vha, 0x70b2,
1962 "Command completed with date underrun thread_id=%d\n",
1964 rval = EXT_STATUS_DATA_UNDERRUN;
1966 case CS_BIDIR_RD_OVERRUN:
1967 ql_dbg(ql_dbg_user, vha, 0x70b3,
1968 "Command completed with read data overrun thread_id=%d\n",
1970 rval = EXT_STATUS_DATA_OVERRUN;
1973 case CS_BIDIR_RD_WR_OVERRUN:
1974 ql_dbg(ql_dbg_user, vha, 0x70b4,
1975 "Command completed with read and write data overrun "
1976 "thread_id=%d\n", thread_id);
1977 rval = EXT_STATUS_DATA_OVERRUN;
1980 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1981 ql_dbg(ql_dbg_user, vha, 0x70b5,
1982 "Command completed with read data over and write data "
1983 "underrun thread_id=%d\n", thread_id);
1984 rval = EXT_STATUS_DATA_OVERRUN;
1987 case CS_BIDIR_RD_UNDERRUN:
1988 ql_dbg(ql_dbg_user, vha, 0x70b6,
1989 "Command completed with read data data underrun "
1990 "thread_id=%d\n", thread_id);
1991 rval = EXT_STATUS_DATA_UNDERRUN;
1994 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1995 ql_dbg(ql_dbg_user, vha, 0x70b7,
1996 "Command completed with read data under and write data "
1997 "overrun thread_id=%d\n", thread_id);
1998 rval = EXT_STATUS_DATA_UNDERRUN;
2001 case CS_BIDIR_RD_WR_UNDERRUN:
2002 ql_dbg(ql_dbg_user, vha, 0x70b8,
2003 "Command completed with read and write data underrun "
2004 "thread_id=%d\n", thread_id);
2005 rval = EXT_STATUS_DATA_UNDERRUN;
2009 ql_dbg(ql_dbg_user, vha, 0x70b9,
2010 "Command completed with data DMA error thread_id=%d\n",
2012 rval = EXT_STATUS_DMA_ERR;
2016 ql_dbg(ql_dbg_user, vha, 0x70ba,
2017 "Command completed with timeout thread_id=%d\n",
2019 rval = EXT_STATUS_TIMEOUT;
2022 ql_dbg(ql_dbg_user, vha, 0x70bb,
2023 "Command completed with completion status=0x%x "
2024 "thread_id=%d\n", comp_status, thread_id);
2025 rval = EXT_STATUS_ERR;
2028 bsg_job->reply->reply_payload_rcv_len = 0;
2031 /* Return the vendor specific reply to API */
2032 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2033 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2034 /* Always return DID_OK, bsg will send the vendor specific response
2035 * in this case only */
2036 sp->done(vha, sp, (DID_OK << 6));
2041 * qla2x00_status_entry() - Process a Status IOCB entry.
2042 * @ha: SCSI driver HA context
2043 * @pkt: Entry pointer
2046 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2050 struct scsi_cmnd *cp;
2052 struct sts_entry_24xx *sts24;
2053 uint16_t comp_status;
2054 uint16_t scsi_status;
2056 uint8_t lscsi_status;
2058 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2060 uint8_t *rsp_info, *sense_data;
2061 struct qla_hw_data *ha = vha->hw;
2064 struct req_que *req;
2067 uint16_t state_flags = 0;
2068 uint16_t retry_delay = 0;
2070 sts = (sts_entry_t *) pkt;
2071 sts24 = (struct sts_entry_24xx *) pkt;
2072 if (IS_FWI2_CAPABLE(ha)) {
2073 comp_status = le16_to_cpu(sts24->comp_status);
2074 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2075 state_flags = le16_to_cpu(sts24->state_flags);
2077 comp_status = le16_to_cpu(sts->comp_status);
2078 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2080 handle = (uint32_t) LSW(sts->handle);
2081 que = MSW(sts->handle);
2082 req = ha->req_q_map[que];
2084 /* Check for invalid queue pointer */
2086 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2087 ql_dbg(ql_dbg_io, vha, 0x3059,
2088 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2089 "que=%u.\n", sts->handle, req, que);
2093 /* Validate handle. */
2094 if (handle < req->num_outstanding_cmds) {
2095 sp = req->outstanding_cmds[handle];
2097 ql_dbg(ql_dbg_io, vha, 0x3075,
2098 "%s(%ld): Already returned command for status handle (0x%x).\n",
2099 __func__, vha->host_no, sts->handle);
2103 ql_dbg(ql_dbg_io, vha, 0x3017,
2104 "Invalid status handle, out of range (0x%x).\n",
2107 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2108 if (IS_P3P_TYPE(ha))
2109 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2111 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2112 qla2xxx_wake_dpc(vha);
2117 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2118 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2122 /* Task Management completion. */
2123 if (sp->type == SRB_TM_CMD) {
2124 qla24xx_tm_iocb_entry(vha, req, pkt);
2128 /* Fast path completion. */
2129 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2130 qla2x00_process_completed_request(vha, req, handle);
2135 req->outstanding_cmds[handle] = NULL;
2136 cp = GET_CMD_SP(sp);
2138 ql_dbg(ql_dbg_io, vha, 0x3018,
2139 "Command already returned (0x%x/%p).\n",
2145 lscsi_status = scsi_status & STATUS_MASK;
2147 fcport = sp->fcport;
2150 sense_len = par_sense_len = rsp_info_len = resid_len =
2152 if (IS_FWI2_CAPABLE(ha)) {
2153 if (scsi_status & SS_SENSE_LEN_VALID)
2154 sense_len = le32_to_cpu(sts24->sense_len);
2155 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2156 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2157 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2158 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2159 if (comp_status == CS_DATA_UNDERRUN)
2160 fw_resid_len = le32_to_cpu(sts24->residual_len);
2161 rsp_info = sts24->data;
2162 sense_data = sts24->data;
2163 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2164 ox_id = le16_to_cpu(sts24->ox_id);
2165 par_sense_len = sizeof(sts24->data);
2166 /* Valid values of the retry delay timer are 0x1-0xffef */
2167 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2168 retry_delay = sts24->retry_delay;
2170 if (scsi_status & SS_SENSE_LEN_VALID)
2171 sense_len = le16_to_cpu(sts->req_sense_length);
2172 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2173 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2174 resid_len = le32_to_cpu(sts->residual_length);
2175 rsp_info = sts->rsp_info;
2176 sense_data = sts->req_sense_data;
2177 par_sense_len = sizeof(sts->req_sense_data);
2180 /* Check for any FCP transport errors. */
2181 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2182 /* Sense data lies beyond any FCP RESPONSE data. */
2183 if (IS_FWI2_CAPABLE(ha)) {
2184 sense_data += rsp_info_len;
2185 par_sense_len -= rsp_info_len;
2187 if (rsp_info_len > 3 && rsp_info[3]) {
2188 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2189 "FCP I/O protocol failure (0x%x/0x%x).\n",
2190 rsp_info_len, rsp_info[3]);
2192 res = DID_BUS_BUSY << 16;
2197 /* Check for overrun. */
2198 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2199 scsi_status & SS_RESIDUAL_OVER)
2200 comp_status = CS_DATA_OVERRUN;
2203 * Check retry_delay_timer value if we receive a busy or
2206 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2207 lscsi_status == SAM_STAT_BUSY)
2208 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2211 * Based on Host and scsi status generate status code for Linux
2213 switch (comp_status) {
2216 if (scsi_status == 0) {
2220 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2222 scsi_set_resid(cp, resid);
2224 if (!lscsi_status &&
2225 ((unsigned)(scsi_bufflen(cp) - resid) <
2227 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2228 "Mid-layer underflow "
2229 "detected (0x%x of 0x%x bytes).\n",
2230 resid, scsi_bufflen(cp));
2232 res = DID_ERROR << 16;
2236 res = DID_OK << 16 | lscsi_status;
2238 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2239 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2240 "QUEUE FULL detected.\n");
2244 if (lscsi_status != SS_CHECK_CONDITION)
2247 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2248 if (!(scsi_status & SS_SENSE_LEN_VALID))
2251 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2255 case CS_DATA_UNDERRUN:
2256 /* Use F/W calculated residual length. */
2257 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2258 scsi_set_resid(cp, resid);
2259 if (scsi_status & SS_RESIDUAL_UNDER) {
2260 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2261 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2262 "Dropped frame(s) detected "
2263 "(0x%x of 0x%x bytes).\n",
2264 resid, scsi_bufflen(cp));
2266 res = DID_ERROR << 16 | lscsi_status;
2267 goto check_scsi_status;
2270 if (!lscsi_status &&
2271 ((unsigned)(scsi_bufflen(cp) - resid) <
2273 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2274 "Mid-layer underflow "
2275 "detected (0x%x of 0x%x bytes).\n",
2276 resid, scsi_bufflen(cp));
2278 res = DID_ERROR << 16;
2281 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2282 lscsi_status != SAM_STAT_BUSY) {
2284 * scsi status of task set and busy are considered to be
2285 * task not completed.
2288 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2289 "Dropped frame(s) detected (0x%x "
2290 "of 0x%x bytes).\n", resid,
2293 res = DID_ERROR << 16 | lscsi_status;
2294 goto check_scsi_status;
2296 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2297 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2298 scsi_status, lscsi_status);
2301 res = DID_OK << 16 | lscsi_status;
2306 * Check to see if SCSI Status is non zero. If so report SCSI
2309 if (lscsi_status != 0) {
2310 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2311 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2312 "QUEUE FULL detected.\n");
2316 if (lscsi_status != SS_CHECK_CONDITION)
2319 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2320 if (!(scsi_status & SS_SENSE_LEN_VALID))
2323 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2324 sense_len, rsp, res);
2328 case CS_PORT_LOGGED_OUT:
2329 case CS_PORT_CONFIG_CHG:
2332 case CS_PORT_UNAVAILABLE:
2337 * We are going to have the fc class block the rport
2338 * while we try to recover so instruct the mid layer
2339 * to requeue until the class decides how to handle this.
2341 res = DID_TRANSPORT_DISRUPTED << 16;
2343 if (comp_status == CS_TIMEOUT) {
2344 if (IS_FWI2_CAPABLE(ha))
2346 else if ((le16_to_cpu(sts->status_flags) &
2347 SF_LOGOUT_SENT) == 0)
2351 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2352 "Port to be marked lost on fcport=%02x%02x%02x, current "
2353 "port state= %s.\n", fcport->d_id.b.domain,
2354 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2355 port_state_str[atomic_read(&fcport->state)]);
2357 if (atomic_read(&fcport->state) == FCS_ONLINE)
2358 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2362 res = DID_RESET << 16;
2366 logit = qla2x00_handle_dif_error(sp, sts24);
2371 res = DID_ERROR << 16;
2373 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2376 if (state_flags & BIT_4)
2377 scmd_printk(KERN_WARNING, cp,
2378 "Unsupported device '%s' found.\n",
2379 cp->device->vendor);
2383 res = DID_ERROR << 16;
2389 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2390 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2391 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2392 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2393 comp_status, scsi_status, res, vha->host_no,
2394 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2395 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2396 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2397 resid_len, fw_resid_len, sp, cp);
2399 if (rsp->status_srb == NULL)
2400 sp->done(ha, sp, res);
2404 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2405 * @ha: SCSI driver HA context
2406 * @pkt: Entry pointer
2408 * Extended sense data.
2411 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2413 uint8_t sense_sz = 0;
2414 struct qla_hw_data *ha = rsp->hw;
2415 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2416 srb_t *sp = rsp->status_srb;
2417 struct scsi_cmnd *cp;
2421 if (!sp || !GET_CMD_SENSE_LEN(sp))
2424 sense_len = GET_CMD_SENSE_LEN(sp);
2425 sense_ptr = GET_CMD_SENSE_PTR(sp);
2427 cp = GET_CMD_SP(sp);
2429 ql_log(ql_log_warn, vha, 0x3025,
2430 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2432 rsp->status_srb = NULL;
2436 if (sense_len > sizeof(pkt->data))
2437 sense_sz = sizeof(pkt->data);
2439 sense_sz = sense_len;
2441 /* Move sense data. */
2442 if (IS_FWI2_CAPABLE(ha))
2443 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2444 memcpy(sense_ptr, pkt->data, sense_sz);
2445 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2446 sense_ptr, sense_sz);
2448 sense_len -= sense_sz;
2449 sense_ptr += sense_sz;
2451 SET_CMD_SENSE_PTR(sp, sense_ptr);
2452 SET_CMD_SENSE_LEN(sp, sense_len);
2454 /* Place command on done queue. */
2455 if (sense_len == 0) {
2456 rsp->status_srb = NULL;
2457 sp->done(ha, sp, cp->result);
2462 * qla2x00_error_entry() - Process an error entry.
2463 * @ha: SCSI driver HA context
2464 * @pkt: Entry pointer
2467 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2470 struct qla_hw_data *ha = vha->hw;
2471 const char func[] = "ERROR-IOCB";
2472 uint16_t que = MSW(pkt->handle);
2473 struct req_que *req = NULL;
2474 int res = DID_ERROR << 16;
2476 ql_dbg(ql_dbg_async, vha, 0x502a,
2477 "type of error status in response: 0x%x\n", pkt->entry_status);
2479 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2482 req = ha->req_q_map[que];
2484 if (pkt->entry_status & RF_BUSY)
2485 res = DID_BUS_BUSY << 16;
2487 if (pkt->entry_type == NOTIFY_ACK_TYPE &&
2488 pkt->handle == QLA_TGT_SKIP_HANDLE)
2491 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2493 sp->done(ha, sp, res);
2497 ql_log(ql_log_warn, vha, 0x5030,
2498 "Error entry - invalid handle/queue (%04x).\n", que);
2502 * qla24xx_mbx_completion() - Process mailbox command completions.
2503 * @ha: SCSI driver HA context
2504 * @mb0: Mailbox0 register
2507 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2511 uint16_t __iomem *wptr;
2512 struct qla_hw_data *ha = vha->hw;
2513 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2515 /* Read all mbox registers? */
2516 WARN_ON_ONCE(ha->mbx_count > 32);
2517 mboxes = (1ULL << ha->mbx_count) - 1;
2519 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2521 mboxes = ha->mcp->in_mb;
2523 /* Load return mailbox registers. */
2524 ha->flags.mbox_int = 1;
2525 ha->mailbox_out[0] = mb0;
2527 wptr = (uint16_t __iomem *)®->mailbox1;
2529 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2531 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2539 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2540 struct abort_entry_24xx *pkt)
2542 const char func[] = "ABT_IOCB";
2544 struct srb_iocb *abt;
2546 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2550 abt = &sp->u.iocb_cmd;
2551 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2552 sp->done(vha, sp, 0);
2556 * qla24xx_process_response_queue() - Process response queue entries.
2557 * @ha: SCSI driver HA context
2559 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2560 struct rsp_que *rsp)
2562 struct sts_entry_24xx *pkt;
2563 struct qla_hw_data *ha = vha->hw;
2565 if (!vha->flags.online)
2568 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2569 /* if kernel does not notify qla of IRQ's CPU change,
2572 rsp->msix->cpuid = smp_processor_id();
2573 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2576 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2577 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2580 if (rsp->ring_index == rsp->length) {
2581 rsp->ring_index = 0;
2582 rsp->ring_ptr = rsp->ring;
2587 if (pkt->entry_status != 0) {
2588 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2590 if (qlt_24xx_process_response_error(vha, pkt))
2593 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2599 switch (pkt->entry_type) {
2601 qla2x00_status_entry(vha, rsp, pkt);
2603 case STATUS_CONT_TYPE:
2604 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2606 case VP_RPT_ID_IOCB_TYPE:
2607 qla24xx_report_id_acquisition(vha,
2608 (struct vp_rpt_id_entry_24xx *)pkt);
2610 case LOGINOUT_PORT_IOCB_TYPE:
2611 qla24xx_logio_entry(vha, rsp->req,
2612 (struct logio_entry_24xx *)pkt);
2615 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2618 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2620 case ABTS_RECV_24XX:
2621 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2622 /* ensure that the ATIO queue is empty */
2623 qlt_handle_abts_recv(vha, (response_t *)pkt);
2627 qlt_24xx_process_atio_queue(vha, 1);
2629 case ABTS_RESP_24XX:
2631 case NOTIFY_ACK_TYPE:
2633 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2636 /* Do nothing in this case, this check is to prevent it
2637 * from falling into default case
2640 case ABORT_IOCB_TYPE:
2641 qla24xx_abort_iocb_entry(vha, rsp->req,
2642 (struct abort_entry_24xx *)pkt);
2645 /* Type Not Supported. */
2646 ql_dbg(ql_dbg_async, vha, 0x5042,
2647 "Received unknown response pkt type %x "
2648 "entry status=%x.\n",
2649 pkt->entry_type, pkt->entry_status);
2652 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2656 /* Adjust ring index */
2657 if (IS_P3P_TYPE(ha)) {
2658 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2659 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2661 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2665 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2669 struct qla_hw_data *ha = vha->hw;
2670 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2672 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2677 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2678 RD_REG_DWORD(®->iobase_addr);
2679 WRT_REG_DWORD(®->iobase_window, 0x0001);
2680 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2681 rval == QLA_SUCCESS; cnt--) {
2683 WRT_REG_DWORD(®->iobase_window, 0x0001);
2686 rval = QLA_FUNCTION_TIMEOUT;
2688 if (rval == QLA_SUCCESS)
2692 WRT_REG_DWORD(®->iobase_window, 0x0003);
2693 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2694 rval == QLA_SUCCESS; cnt--) {
2696 WRT_REG_DWORD(®->iobase_window, 0x0003);
2699 rval = QLA_FUNCTION_TIMEOUT;
2701 if (rval != QLA_SUCCESS)
2705 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2706 ql_log(ql_log_info, vha, 0x504c,
2707 "Additional code -- 0x55AA.\n");
2710 WRT_REG_DWORD(®->iobase_window, 0x0000);
2711 RD_REG_DWORD(®->iobase_window);
2715 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2717 * @dev_id: SCSI driver HA context
2719 * Called by system whenever the host adapter generates an interrupt.
2721 * Returns handled flag.
2724 qla24xx_intr_handler(int irq, void *dev_id)
2726 scsi_qla_host_t *vha;
2727 struct qla_hw_data *ha;
2728 struct device_reg_24xx __iomem *reg;
2734 struct rsp_que *rsp;
2735 unsigned long flags;
2737 rsp = (struct rsp_que *) dev_id;
2739 ql_log(ql_log_info, NULL, 0x5059,
2740 "%s: NULL response queue pointer.\n", __func__);
2745 reg = &ha->iobase->isp24;
2748 if (unlikely(pci_channel_offline(ha->pdev)))
2751 spin_lock_irqsave(&ha->hardware_lock, flags);
2752 vha = pci_get_drvdata(ha->pdev);
2753 for (iter = 50; iter--; ) {
2754 stat = RD_REG_DWORD(®->host_status);
2755 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2757 if (stat & HSRX_RISC_PAUSED) {
2758 if (unlikely(pci_channel_offline(ha->pdev)))
2761 hccr = RD_REG_DWORD(®->hccr);
2763 ql_log(ql_log_warn, vha, 0x504b,
2764 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2767 qla2xxx_check_risc_status(vha);
2769 ha->isp_ops->fw_dump(vha, 1);
2770 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2772 } else if ((stat & HSRX_RISC_INT) == 0)
2775 switch (stat & 0xff) {
2776 case INTR_ROM_MB_SUCCESS:
2777 case INTR_ROM_MB_FAILED:
2778 case INTR_MB_SUCCESS:
2779 case INTR_MB_FAILED:
2780 qla24xx_mbx_completion(vha, MSW(stat));
2781 status |= MBX_INTERRUPT;
2784 case INTR_ASYNC_EVENT:
2786 mb[1] = RD_REG_WORD(®->mailbox1);
2787 mb[2] = RD_REG_WORD(®->mailbox2);
2788 mb[3] = RD_REG_WORD(®->mailbox3);
2789 qla2x00_async_event(vha, rsp, mb);
2791 case INTR_RSP_QUE_UPDATE:
2792 case INTR_RSP_QUE_UPDATE_83XX:
2793 qla24xx_process_response_queue(vha, rsp);
2795 case INTR_ATIO_QUE_UPDATE:{
2796 unsigned long flags2;
2797 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2798 qlt_24xx_process_atio_queue(vha, 1);
2799 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2802 case INTR_ATIO_RSP_QUE_UPDATE: {
2803 unsigned long flags2;
2804 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2805 qlt_24xx_process_atio_queue(vha, 1);
2806 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2808 qla24xx_process_response_queue(vha, rsp);
2812 ql_dbg(ql_dbg_async, vha, 0x504f,
2813 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2816 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2817 RD_REG_DWORD_RELAXED(®->hccr);
2818 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2821 qla2x00_handle_mbx_completion(ha, status);
2822 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2828 qla24xx_msix_rsp_q(int irq, void *dev_id)
2830 struct qla_hw_data *ha;
2831 struct rsp_que *rsp;
2832 struct device_reg_24xx __iomem *reg;
2833 struct scsi_qla_host *vha;
2834 unsigned long flags;
2837 rsp = (struct rsp_que *) dev_id;
2839 ql_log(ql_log_info, NULL, 0x505a,
2840 "%s: NULL response queue pointer.\n", __func__);
2844 reg = &ha->iobase->isp24;
2846 spin_lock_irqsave(&ha->hardware_lock, flags);
2848 vha = pci_get_drvdata(ha->pdev);
2850 * Use host_status register to check to PCI disconnection before we
2851 * we process the response queue.
2853 stat = RD_REG_DWORD(®->host_status);
2854 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2856 qla24xx_process_response_queue(vha, rsp);
2857 if (!ha->flags.disable_msix_handshake) {
2858 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2859 RD_REG_DWORD_RELAXED(®->hccr);
2862 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2868 qla25xx_msix_rsp_q(int irq, void *dev_id)
2870 struct qla_hw_data *ha;
2871 scsi_qla_host_t *vha;
2872 struct rsp_que *rsp;
2873 struct device_reg_24xx __iomem *reg;
2874 unsigned long flags;
2877 rsp = (struct rsp_que *) dev_id;
2879 ql_log(ql_log_info, NULL, 0x505b,
2880 "%s: NULL response queue pointer.\n", __func__);
2884 vha = pci_get_drvdata(ha->pdev);
2886 /* Clear the interrupt, if enabled, for this response queue */
2887 if (!ha->flags.disable_msix_handshake) {
2888 reg = &ha->iobase->isp24;
2889 spin_lock_irqsave(&ha->hardware_lock, flags);
2890 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2891 hccr = RD_REG_DWORD_RELAXED(®->hccr);
2892 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2894 if (qla2x00_check_reg32_for_disconnect(vha, hccr))
2896 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2903 qla24xx_msix_default(int irq, void *dev_id)
2905 scsi_qla_host_t *vha;
2906 struct qla_hw_data *ha;
2907 struct rsp_que *rsp;
2908 struct device_reg_24xx __iomem *reg;
2913 unsigned long flags;
2915 rsp = (struct rsp_que *) dev_id;
2917 ql_log(ql_log_info, NULL, 0x505c,
2918 "%s: NULL response queue pointer.\n", __func__);
2922 reg = &ha->iobase->isp24;
2925 spin_lock_irqsave(&ha->hardware_lock, flags);
2926 vha = pci_get_drvdata(ha->pdev);
2928 stat = RD_REG_DWORD(®->host_status);
2929 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2931 if (stat & HSRX_RISC_PAUSED) {
2932 if (unlikely(pci_channel_offline(ha->pdev)))
2935 hccr = RD_REG_DWORD(®->hccr);
2937 ql_log(ql_log_info, vha, 0x5050,
2938 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2941 qla2xxx_check_risc_status(vha);
2943 ha->isp_ops->fw_dump(vha, 1);
2944 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2946 } else if ((stat & HSRX_RISC_INT) == 0)
2949 switch (stat & 0xff) {
2950 case INTR_ROM_MB_SUCCESS:
2951 case INTR_ROM_MB_FAILED:
2952 case INTR_MB_SUCCESS:
2953 case INTR_MB_FAILED:
2954 qla24xx_mbx_completion(vha, MSW(stat));
2955 status |= MBX_INTERRUPT;
2958 case INTR_ASYNC_EVENT:
2960 mb[1] = RD_REG_WORD(®->mailbox1);
2961 mb[2] = RD_REG_WORD(®->mailbox2);
2962 mb[3] = RD_REG_WORD(®->mailbox3);
2963 qla2x00_async_event(vha, rsp, mb);
2965 case INTR_RSP_QUE_UPDATE:
2966 case INTR_RSP_QUE_UPDATE_83XX:
2967 qla24xx_process_response_queue(vha, rsp);
2969 case INTR_ATIO_QUE_UPDATE:{
2970 unsigned long flags2;
2971 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2972 qlt_24xx_process_atio_queue(vha, 1);
2973 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2976 case INTR_ATIO_RSP_QUE_UPDATE: {
2977 unsigned long flags2;
2978 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2979 qlt_24xx_process_atio_queue(vha, 1);
2980 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2982 qla24xx_process_response_queue(vha, rsp);
2986 ql_dbg(ql_dbg_async, vha, 0x5051,
2987 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2990 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2992 qla2x00_handle_mbx_completion(ha, status);
2993 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2998 /* Interrupt handling helpers. */
3000 struct qla_init_msix_entry {
3002 irq_handler_t handler;
3005 static struct qla_init_msix_entry msix_entries[3] = {
3006 { "qla2xxx (default)", qla24xx_msix_default },
3007 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3008 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
3011 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
3012 { "qla2xxx (default)", qla82xx_msix_default },
3013 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3016 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
3017 { "qla2xxx (default)", qla24xx_msix_default },
3018 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3019 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3023 qla24xx_disable_msix(struct qla_hw_data *ha)
3026 struct qla_msix_entry *qentry;
3027 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3029 for (i = 0; i < ha->msix_count; i++) {
3030 qentry = &ha->msix_entries[i];
3031 if (qentry->have_irq) {
3032 /* un-register irq cpu affinity notification */
3033 irq_set_affinity_notifier(qentry->vector, NULL);
3034 free_irq(qentry->vector, qentry->rsp);
3037 pci_disable_msix(ha->pdev);
3038 kfree(ha->msix_entries);
3039 ha->msix_entries = NULL;
3040 ha->flags.msix_enabled = 0;
3041 ql_dbg(ql_dbg_init, vha, 0x0042,
3042 "Disabled the MSI.\n");
3046 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3048 #define MIN_MSIX_COUNT 2
3049 #define ATIO_VECTOR 2
3051 struct msix_entry *entries;
3052 struct qla_msix_entry *qentry;
3053 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3055 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
3058 ql_log(ql_log_warn, vha, 0x00bc,
3059 "Failed to allocate memory for msix_entry.\n");
3063 for (i = 0; i < ha->msix_count; i++)
3064 entries[i].entry = i;
3066 ret = pci_enable_msix_range(ha->pdev,
3067 entries, MIN_MSIX_COUNT, ha->msix_count);
3069 ql_log(ql_log_fatal, vha, 0x00c7,
3070 "MSI-X: Failed to enable support, "
3071 "giving up -- %d/%d.\n",
3072 ha->msix_count, ret);
3074 } else if (ret < ha->msix_count) {
3075 ql_log(ql_log_warn, vha, 0x00c6,
3076 "MSI-X: Failed to enable support "
3077 "-- %d/%d\n Retry with %d vectors.\n",
3078 ha->msix_count, ret, ret);
3079 ha->msix_count = ret;
3080 ha->max_rsp_queues = ha->msix_count - 1;
3082 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3083 ha->msix_count, GFP_KERNEL);
3084 if (!ha->msix_entries) {
3085 ql_log(ql_log_fatal, vha, 0x00c8,
3086 "Failed to allocate memory for ha->msix_entries.\n");
3090 ha->flags.msix_enabled = 1;
3092 for (i = 0; i < ha->msix_count; i++) {
3093 qentry = &ha->msix_entries[i];
3094 qentry->vector = entries[i].vector;
3095 qentry->entry = entries[i].entry;
3096 qentry->have_irq = 0;
3098 qentry->irq_notify.notify = qla_irq_affinity_notify;
3099 qentry->irq_notify.release = qla_irq_affinity_release;
3103 /* Enable MSI-X vectors for the base queue */
3104 for (i = 0; i < 2; i++) {
3105 qentry = &ha->msix_entries[i];
3108 if (IS_P3P_TYPE(ha))
3109 ret = request_irq(qentry->vector,
3110 qla82xx_msix_entries[i].handler,
3111 0, qla82xx_msix_entries[i].name, rsp);
3113 ret = request_irq(qentry->vector,
3114 msix_entries[i].handler,
3115 0, msix_entries[i].name, rsp);
3117 goto msix_register_fail;
3118 qentry->have_irq = 1;
3120 /* Register for CPU affinity notification. */
3121 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3123 /* Schedule work (ie. trigger a notification) to read cpu
3124 * mask for this specific irq.
3125 * kref_get is required because
3126 * irq_affinity_notify() will do
3129 kref_get(&qentry->irq_notify.kref);
3130 schedule_work(&qentry->irq_notify.work);
3134 * If target mode is enable, also request the vector for the ATIO
3137 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3138 qentry = &ha->msix_entries[ATIO_VECTOR];
3141 ret = request_irq(qentry->vector,
3142 qla83xx_msix_entries[ATIO_VECTOR].handler,
3143 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
3144 qentry->have_irq = 1;
3149 ql_log(ql_log_fatal, vha, 0x00cb,
3150 "MSI-X: unable to register handler -- %x/%d.\n",
3151 qentry->vector, ret);
3152 qla24xx_disable_msix(ha);
3157 /* Enable MSI-X vector for response queue update for queue 0 */
3158 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3159 if (ha->msixbase && ha->mqiobase &&
3160 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3164 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3166 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3167 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3168 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3169 ql_dbg(ql_dbg_init, vha, 0x0055,
3170 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3171 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3178 pci_free_irq_vectors(ha->pdev);
3183 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3185 int ret = QLA_FUNCTION_FAILED;
3186 device_reg_t *reg = ha->iobase;
3187 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3189 /* If possible, enable MSI-X. */
3190 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3191 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3195 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3196 (ha->pdev->subsystem_device == 0x7040 ||
3197 ha->pdev->subsystem_device == 0x7041 ||
3198 ha->pdev->subsystem_device == 0x1705)) {
3199 ql_log(ql_log_warn, vha, 0x0034,
3200 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3201 ha->pdev->subsystem_vendor,
3202 ha->pdev->subsystem_device);
3206 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3207 ql_log(ql_log_warn, vha, 0x0035,
3208 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3209 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3213 ret = qla24xx_enable_msix(ha, rsp);
3215 ql_dbg(ql_dbg_init, vha, 0x0036,
3216 "MSI-X: Enabled (0x%X, 0x%X).\n",
3217 ha->chip_revision, ha->fw_attributes);
3218 goto clear_risc_ints;
3223 ql_log(ql_log_info, vha, 0x0037,
3224 "Falling back-to MSI mode -%d.\n", ret);
3226 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3227 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3231 ret = pci_enable_msi(ha->pdev);
3233 ql_dbg(ql_dbg_init, vha, 0x0038,
3235 ha->flags.msi_enabled = 1;
3237 ql_log(ql_log_warn, vha, 0x0039,
3238 "Falling back-to INTa mode -- %d.\n", ret);
3241 /* Skip INTx on ISP82xx. */
3242 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3243 return QLA_FUNCTION_FAILED;
3245 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3246 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3247 QLA2XXX_DRIVER_NAME, rsp);
3249 ql_log(ql_log_warn, vha, 0x003a,
3250 "Failed to reserve interrupt %d already in use.\n",
3253 } else if (!ha->flags.msi_enabled) {
3254 ql_dbg(ql_dbg_init, vha, 0x0125,
3255 "INTa mode: Enabled.\n");
3256 ha->flags.mr_intr_valid = 1;
3260 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3263 spin_lock_irq(&ha->hardware_lock);
3264 WRT_REG_WORD(®->isp.semaphore, 0);
3265 spin_unlock_irq(&ha->hardware_lock);
3272 qla2x00_free_irqs(scsi_qla_host_t *vha)
3274 struct qla_hw_data *ha = vha->hw;
3275 struct rsp_que *rsp;
3278 * We need to check that ha->rsp_q_map is valid in case we are called
3279 * from a probe failure context.
3281 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3283 rsp = ha->rsp_q_map[0];
3285 if (ha->flags.msix_enabled)
3286 qla24xx_disable_msix(ha);
3287 else if (ha->flags.msi_enabled) {
3288 free_irq(ha->pdev->irq, rsp);
3289 pci_disable_msi(ha->pdev);
3291 free_irq(ha->pdev->irq, rsp);
3295 int qla25xx_request_irq(struct rsp_que *rsp)
3297 struct qla_hw_data *ha = rsp->hw;
3298 struct qla_init_msix_entry *intr = &msix_entries[2];
3299 struct qla_msix_entry *msix = rsp->msix;
3300 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3303 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3305 ql_log(ql_log_fatal, vha, 0x00e6,
3306 "MSI-X: Unable to register handler -- %x/%d.\n",
3316 /* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3317 static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3318 const cpumask_t *mask)
3320 struct qla_msix_entry *e =
3321 container_of(notify, struct qla_msix_entry, irq_notify);
3322 struct qla_hw_data *ha;
3323 struct scsi_qla_host *base_vha;
3325 /* user is recommended to set mask to just 1 cpu */
3326 e->cpuid = cpumask_first(mask);
3329 base_vha = pci_get_drvdata(ha->pdev);
3331 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3332 "%s: host %ld : vector %d cpu %d \n", __func__,
3333 base_vha->host_no, e->vector, e->cpuid);
3336 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3337 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3338 ha->tgt.rspq_vector_cpuid = e->cpuid;
3339 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3340 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3341 __func__, base_vha->host_no, e->vector, e->cpuid);
3346 static void qla_irq_affinity_release(struct kref *ref)
3348 struct irq_affinity_notify *notify =
3349 container_of(ref, struct irq_affinity_notify, kref);
3350 struct qla_msix_entry *e =
3351 container_of(notify, struct qla_msix_entry, irq_notify);
3352 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
3354 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3355 "%s: host%ld: vector %d cpu %d \n", __func__,
3356 base_vha->host_no, e->vector, e->cpuid);