1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include "qla_devtbl.h"
19 #include "qla_target.h"
22 * QLogic ISP2x00 Hardware Support Function Prototypes.
24 static int qla2x00_isp_firmware(scsi_qla_host_t *);
25 static int qla2x00_setup_chip(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
32 static int qla2x00_restart_isp(scsi_qla_host_t *);
34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
35 static int qla84xx_init_chip(scsi_qla_host_t *);
36 static int qla25xx_init_queues(struct qla_hw_data *);
37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
38 struct event_arg *ea);
39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
43 /* SRB Extensions ---------------------------------------------------------- */
46 qla2x00_sp_timeout(struct timer_list *t)
48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
49 struct srb_iocb *iocb;
51 WARN_ON(irqs_disabled());
52 iocb = &sp->u.iocb_cmd;
56 kref_put(&sp->cmd_kref, qla2x00_sp_release);
59 void qla2x00_sp_free(srb_t *sp)
61 struct srb_iocb *iocb = &sp->u.iocb_cmd;
63 del_timer(&iocb->timer);
67 void qla2xxx_rel_done_warning(srb_t *sp, int res)
69 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
72 void qla2xxx_rel_free_warning(srb_t *sp)
74 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
77 /* Asynchronous Login/Logout Routines -------------------------------------- */
80 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
83 struct qla_hw_data *ha = vha->hw;
85 /* Firmware should use switch negotiated r_a_tov for timeout. */
86 tmo = ha->r_a_tov / 10 * 2;
88 tmo = FX00_DEF_RATOV * 2;
89 } else if (!IS_FWI2_CAPABLE(ha)) {
91 * Except for earlier ISPs where the timeout is seeded from the
92 * initialization control block.
94 tmo = ha->login_timeout;
99 static void qla24xx_abort_iocb_timeout(void *data)
102 struct srb_iocb *abt = &sp->u.iocb_cmd;
103 struct qla_qpair *qpair = sp->qpair;
108 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
109 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
110 sp->cmd_sp->handle, sp->cmd_sp->type,
111 sp->handle, sp->type);
113 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
114 "Abort timeout 2 - hdl=%x, type=%x\n",
115 sp->handle, sp->type);
117 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
118 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
119 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
121 qpair->req->outstanding_cmds[handle] = NULL;
123 /* removing the abort */
124 if (qpair->req->outstanding_cmds[handle] == sp) {
125 qpair->req->outstanding_cmds[handle] = NULL;
129 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
133 * This done function should take care of
134 * original command ref: INIT
136 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
139 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
140 sp->done(sp, QLA_OS_TIMER_EXPIRED);
143 static void qla24xx_abort_sp_done(srb_t *sp, int res)
145 struct srb_iocb *abt = &sp->u.iocb_cmd;
146 srb_t *orig_sp = sp->cmd_sp;
149 qla_wait_nvme_release_cmd_kref(orig_sp);
151 if (sp->flags & SRB_WAKEUP_ON_COMP)
152 complete(&abt->u.abt.comp);
155 kref_put(&sp->cmd_kref, qla2x00_sp_release);
158 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
160 scsi_qla_host_t *vha = cmd_sp->vha;
161 struct srb_iocb *abt_iocb;
163 int rval = QLA_FUNCTION_FAILED;
165 /* ref: INIT for ABTS command */
166 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
169 return QLA_MEMORY_ALLOC_FAILED;
171 abt_iocb = &sp->u.iocb_cmd;
172 sp->type = SRB_ABT_CMD;
174 sp->qpair = cmd_sp->qpair;
177 sp->flags = SRB_WAKEUP_ON_COMP;
179 init_completion(&abt_iocb->u.abt.comp);
180 /* FW can send 2 x ABTS's timeout/20s */
181 qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
182 sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
184 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
185 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
187 ql_dbg(ql_dbg_async, vha, 0x507c,
188 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
191 rval = qla2x00_start_sp(sp);
192 if (rval != QLA_SUCCESS) {
194 kref_put(&sp->cmd_kref, qla2x00_sp_release);
199 wait_for_completion(&abt_iocb->u.abt.comp);
200 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
201 QLA_SUCCESS : QLA_ERR_FROM_FW;
203 kref_put(&sp->cmd_kref, qla2x00_sp_release);
210 qla2x00_async_iocb_timeout(void *data)
213 fc_port_t *fcport = sp->fcport;
214 struct srb_iocb *lio = &sp->u.iocb_cmd;
219 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
220 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
221 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
223 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
225 pr_info("Async-%s timeout - hdl=%x.\n",
226 sp->name, sp->handle);
231 rc = qla24xx_async_abort_cmd(sp, false);
233 /* Retry as needed. */
234 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
235 lio->u.logio.data[1] =
236 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
237 QLA_LOGIO_LOGIN_RETRIED : 0;
238 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
239 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
241 if (sp->qpair->req->outstanding_cmds[h] ==
243 sp->qpair->req->outstanding_cmds[h] =
248 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
249 sp->done(sp, QLA_FUNCTION_TIMEOUT);
253 case SRB_CT_PTHRU_CMD:
260 rc = qla24xx_async_abort_cmd(sp, false);
262 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
263 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
265 if (sp->qpair->req->outstanding_cmds[h] ==
267 sp->qpair->req->outstanding_cmds[h] =
272 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
273 sp->done(sp, QLA_FUNCTION_TIMEOUT);
279 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
281 struct scsi_qla_host *vha = sp->vha;
282 struct srb_iocb *lio = &sp->u.iocb_cmd;
285 ql_dbg(ql_dbg_disc, vha, 0x20dd,
286 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
288 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
290 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
291 memset(&ea, 0, sizeof(ea));
292 ea.fcport = sp->fcport;
293 ea.data[0] = lio->u.logio.data[0];
294 ea.data[1] = lio->u.logio.data[1];
295 ea.iop[0] = lio->u.logio.iop[0];
296 ea.iop[1] = lio->u.logio.iop[1];
299 ea.data[0] = MBS_COMMAND_ERROR;
300 qla24xx_handle_plogi_done_event(vha, &ea);
304 kref_put(&sp->cmd_kref, qla2x00_sp_release);
308 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
312 struct srb_iocb *lio;
313 int rval = QLA_FUNCTION_FAILED;
315 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
316 fcport->loop_id == FC_NO_LOOP_ID) {
317 ql_log(ql_log_warn, vha, 0xffff,
318 "%s: %8phC - not sending command.\n",
319 __func__, fcport->port_name);
324 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
328 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
329 fcport->flags |= FCF_ASYNC_SENT;
330 fcport->logout_completed = 0;
332 sp->type = SRB_LOGIN_CMD;
334 sp->gen1 = fcport->rscn_gen;
335 sp->gen2 = fcport->login_gen;
336 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
337 qla2x00_async_login_sp_done);
339 lio = &sp->u.iocb_cmd;
340 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
341 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
343 if (vha->hw->flags.edif_enabled &&
344 vha->e_dbell.db_flags & EDB_ACTIVE) {
345 lio->u.logio.flags |=
346 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
347 ql_dbg(ql_dbg_disc, vha, 0x2072,
348 "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n",
349 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24);
351 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
355 if (NVME_TARGET(vha->hw, fcport))
356 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
358 ql_dbg(ql_dbg_disc, vha, 0x2072,
359 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
360 fcport->port_name, sp->handle, fcport->loop_id,
361 fcport->d_id.b24, fcport->login_retry);
363 rval = qla2x00_start_sp(sp);
364 if (rval != QLA_SUCCESS) {
365 fcport->flags |= FCF_LOGIN_NEEDED;
366 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
374 kref_put(&sp->cmd_kref, qla2x00_sp_release);
375 fcport->flags &= ~FCF_ASYNC_SENT;
377 fcport->flags &= ~FCF_ASYNC_ACTIVE;
381 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
383 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
384 sp->fcport->login_gen++;
385 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
387 kref_put(&sp->cmd_kref, qla2x00_sp_release);
391 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
394 int rval = QLA_FUNCTION_FAILED;
396 fcport->flags |= FCF_ASYNC_SENT;
398 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
402 sp->type = SRB_LOGOUT_CMD;
404 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
405 qla2x00_async_logout_sp_done),
407 ql_dbg(ql_dbg_disc, vha, 0x2070,
408 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
409 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
410 fcport->d_id.b.area, fcport->d_id.b.al_pa,
411 fcport->port_name, fcport->explicit_logout);
413 rval = qla2x00_start_sp(sp);
414 if (rval != QLA_SUCCESS)
420 kref_put(&sp->cmd_kref, qla2x00_sp_release);
422 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
427 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
430 fcport->flags &= ~FCF_ASYNC_ACTIVE;
431 /* Don't re-login in target mode */
432 if (!fcport->tgt_session)
433 qla2x00_mark_device_lost(vha, fcport, 1);
434 qlt_logo_completion_handler(fcport, data[0]);
437 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
439 struct srb_iocb *lio = &sp->u.iocb_cmd;
440 struct scsi_qla_host *vha = sp->vha;
442 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
443 if (!test_bit(UNLOADING, &vha->dpc_flags))
444 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
447 kref_put(&sp->cmd_kref, qla2x00_sp_release);
451 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
456 rval = QLA_FUNCTION_FAILED;
458 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
462 sp->type = SRB_PRLO_CMD;
464 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
465 qla2x00_async_prlo_sp_done);
467 ql_dbg(ql_dbg_disc, vha, 0x2070,
468 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
469 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
470 fcport->d_id.b.area, fcport->d_id.b.al_pa);
472 rval = qla2x00_start_sp(sp);
473 if (rval != QLA_SUCCESS)
480 kref_put(&sp->cmd_kref, qla2x00_sp_release);
482 fcport->flags &= ~FCF_ASYNC_ACTIVE;
487 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
489 struct fc_port *fcport = ea->fcport;
491 ql_dbg(ql_dbg_disc, vha, 0x20d2,
492 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
493 __func__, fcport->port_name, fcport->disc_state,
494 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
495 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
497 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
500 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
501 ql_dbg(ql_dbg_disc, vha, 0x2066,
502 "%s %8phC: adisc fail: post delete\n",
503 __func__, ea->fcport->port_name);
504 /* deleted = 0 & logout_on_delete = force fw cleanup */
506 fcport->logout_on_delete = 1;
507 qlt_schedule_sess_for_deletion(ea->fcport);
511 if (ea->fcport->disc_state == DSC_DELETE_PEND)
514 if (ea->sp->gen2 != ea->fcport->login_gen) {
515 /* target side must have changed it. */
516 ql_dbg(ql_dbg_disc, vha, 0x20d3,
517 "%s %8phC generation changed\n",
518 __func__, ea->fcport->port_name);
520 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
521 qla_rscn_replay(fcport);
522 qlt_schedule_sess_for_deletion(fcport);
526 __qla24xx_handle_gpdb_event(vha, ea);
529 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
531 struct qla_work_evt *e;
533 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
535 return QLA_FUNCTION_FAILED;
537 e->u.fcport.fcport = fcport;
538 fcport->flags |= FCF_ASYNC_ACTIVE;
539 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
540 return qla2x00_post_work(vha, e);
543 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
545 struct scsi_qla_host *vha = sp->vha;
547 struct srb_iocb *lio = &sp->u.iocb_cmd;
549 ql_dbg(ql_dbg_disc, vha, 0x2066,
550 "Async done-%s res %x %8phC\n",
551 sp->name, res, sp->fcport->port_name);
553 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
555 memset(&ea, 0, sizeof(ea));
557 ea.data[0] = lio->u.logio.data[0];
558 ea.data[1] = lio->u.logio.data[1];
559 ea.iop[0] = lio->u.logio.iop[0];
560 ea.iop[1] = lio->u.logio.iop[1];
561 ea.fcport = sp->fcport;
564 ea.data[0] = MBS_COMMAND_ERROR;
566 qla24xx_handle_adisc_event(vha, &ea);
568 kref_put(&sp->cmd_kref, qla2x00_sp_release);
572 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
576 struct srb_iocb *lio;
577 int rval = QLA_FUNCTION_FAILED;
579 if (IS_SESSION_DELETED(fcport)) {
580 ql_log(ql_log_warn, vha, 0xffff,
581 "%s: %8phC is being delete - not sending command.\n",
582 __func__, fcport->port_name);
583 fcport->flags &= ~FCF_ASYNC_ACTIVE;
587 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
590 fcport->flags |= FCF_ASYNC_SENT;
592 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
596 sp->type = SRB_ADISC_CMD;
598 sp->gen1 = fcport->rscn_gen;
599 sp->gen2 = fcport->login_gen;
600 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
601 qla2x00_async_adisc_sp_done);
603 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
604 lio = &sp->u.iocb_cmd;
605 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
608 ql_dbg(ql_dbg_disc, vha, 0x206f,
609 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
610 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
612 rval = qla2x00_start_sp(sp);
613 if (rval != QLA_SUCCESS)
620 kref_put(&sp->cmd_kref, qla2x00_sp_release);
622 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
623 qla2x00_post_async_adisc_work(vha, fcport, data);
627 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
629 struct qla_hw_data *ha = vha->hw;
631 if (IS_FWI2_CAPABLE(ha))
632 return loop_id > NPH_LAST_HANDLE;
634 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
635 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
639 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
640 * @vha: adapter state pointer.
641 * @dev: port structure pointer.
644 * qla2x00 local function return status code.
649 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
652 struct qla_hw_data *ha = vha->hw;
653 unsigned long flags = 0;
657 spin_lock_irqsave(&ha->vport_slock, flags);
659 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
660 if (dev->loop_id >= LOOPID_MAP_SIZE ||
661 qla2x00_is_reserved_id(vha, dev->loop_id)) {
662 dev->loop_id = FC_NO_LOOP_ID;
663 rval = QLA_FUNCTION_FAILED;
665 set_bit(dev->loop_id, ha->loop_id_map);
667 spin_unlock_irqrestore(&ha->vport_slock, flags);
669 if (rval == QLA_SUCCESS)
670 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
671 "Assigning new loopid=%x, portid=%x.\n",
672 dev->loop_id, dev->d_id.b24);
674 ql_log(ql_log_warn, dev->vha, 0x2087,
675 "No loop_id's available, portid=%x.\n",
681 void qla2x00_clear_loop_id(fc_port_t *fcport)
683 struct qla_hw_data *ha = fcport->vha->hw;
685 if (fcport->loop_id == FC_NO_LOOP_ID ||
686 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
689 clear_bit(fcport->loop_id, ha->loop_id_map);
690 fcport->loop_id = FC_NO_LOOP_ID;
693 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
694 struct event_arg *ea)
696 fc_port_t *fcport, *conflict_fcport;
697 struct get_name_list_extended *e;
698 u16 i, n, found = 0, loop_id;
702 u8 current_login_state, nvme_cls;
705 ql_dbg(ql_dbg_disc, vha, 0xffff,
706 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
707 __func__, fcport->port_name, fcport->disc_state,
708 fcport->fw_login_state, ea->rc,
709 fcport->login_gen, fcport->last_login_gen,
710 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
712 if (fcport->disc_state == DSC_DELETE_PEND)
715 if (ea->rc) { /* rval */
716 if (fcport->login_retry == 0) {
717 ql_dbg(ql_dbg_disc, vha, 0x20de,
718 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
719 fcport->port_name, fcport->login_retry);
724 if (fcport->last_rscn_gen != fcport->rscn_gen) {
725 qla_rscn_replay(fcport);
726 qlt_schedule_sess_for_deletion(fcport);
728 } else if (fcport->last_login_gen != fcport->login_gen) {
729 ql_dbg(ql_dbg_disc, vha, 0x20e0,
730 "%s %8phC login gen changed\n",
731 __func__, fcport->port_name);
732 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
736 n = ea->data[0] / sizeof(struct get_name_list_extended);
738 ql_dbg(ql_dbg_disc, vha, 0x20e1,
739 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
740 __func__, __LINE__, fcport->port_name, n,
741 fcport->d_id.b.domain, fcport->d_id.b.area,
742 fcport->d_id.b.al_pa, fcport->loop_id);
744 for (i = 0; i < n; i++) {
746 wwn = wwn_to_u64(e->port_name);
747 id.b.domain = e->port_id[2];
748 id.b.area = e->port_id[1];
749 id.b.al_pa = e->port_id[0];
752 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
755 if (IS_SW_RESV_ADDR(id))
760 loop_id = le16_to_cpu(e->nport_handle);
761 loop_id = (loop_id & 0x7fff);
762 nvme_cls = e->current_login_state >> 4;
763 current_login_state = e->current_login_state & 0xf;
765 if (PRLI_PHASE(nvme_cls)) {
766 current_login_state = nvme_cls;
767 fcport->fc4_type &= ~FS_FC4TYPE_FCP;
768 fcport->fc4_type |= FS_FC4TYPE_NVME;
769 } else if (PRLI_PHASE(current_login_state)) {
770 fcport->fc4_type |= FS_FC4TYPE_FCP;
771 fcport->fc4_type &= ~FS_FC4TYPE_NVME;
774 ql_dbg(ql_dbg_disc, vha, 0x20e2,
775 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
776 __func__, fcport->port_name,
777 e->current_login_state, fcport->fw_login_state,
778 fcport->fc4_type, id.b24, fcport->d_id.b24,
779 loop_id, fcport->loop_id);
781 switch (fcport->disc_state) {
782 case DSC_DELETE_PEND:
786 if ((id.b24 != fcport->d_id.b24 &&
788 fcport->loop_id != FC_NO_LOOP_ID) ||
789 (fcport->loop_id != FC_NO_LOOP_ID &&
790 fcport->loop_id != loop_id)) {
791 ql_dbg(ql_dbg_disc, vha, 0x20e3,
792 "%s %d %8phC post del sess\n",
793 __func__, __LINE__, fcport->port_name);
794 if (fcport->n2n_flag)
795 fcport->d_id.b24 = 0;
796 qlt_schedule_sess_for_deletion(fcport);
802 fcport->loop_id = loop_id;
803 if (fcport->n2n_flag)
804 fcport->d_id.b24 = id.b24;
806 wwn = wwn_to_u64(fcport->port_name);
807 qlt_find_sess_invalidate_other(vha, wwn,
808 id, loop_id, &conflict_fcport);
810 if (conflict_fcport) {
812 * Another share fcport share the same loop_id &
813 * nport id. Conflict fcport needs to finish
814 * cleanup before this fcport can proceed to login.
816 conflict_fcport->conflict = fcport;
817 fcport->login_pause = 1;
820 switch (vha->hw->current_topology) {
822 switch (current_login_state) {
823 case DSC_LS_PRLI_COMP:
825 vha, 0x20e4, "%s %d %8phC post gpdb\n",
826 __func__, __LINE__, fcport->port_name);
828 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
829 fcport->port_type = FCT_INITIATOR;
831 fcport->port_type = FCT_TARGET;
832 data[0] = data[1] = 0;
833 qla2x00_post_async_adisc_work(vha, fcport,
836 case DSC_LS_PLOGI_COMP:
837 if (vha->hw->flags.edif_enabled) {
838 /* check to see if App support Secure */
839 qla24xx_post_gpdb_work(vha, fcport, 0);
843 case DSC_LS_PORT_UNAVAIL:
845 if (fcport->loop_id == FC_NO_LOOP_ID) {
846 qla2x00_find_new_loop_id(vha, fcport);
847 fcport->fw_login_state =
850 ql_dbg(ql_dbg_disc, vha, 0x20e5,
851 "%s %d %8phC\n", __func__, __LINE__,
853 qla24xx_fcport_handle_login(vha, fcport);
858 fcport->fw_login_state = current_login_state;
860 switch (current_login_state) {
861 case DSC_LS_PRLI_PEND:
863 * In the middle of PRLI. Let it finish.
864 * Allow relogin code to recheck state again
865 * with GNL. Push disc_state back to DELETED
866 * so GNL can go out again
868 qla2x00_set_fcport_disc_state(fcport,
870 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
872 case DSC_LS_PRLI_COMP:
873 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
874 fcport->port_type = FCT_INITIATOR;
876 fcport->port_type = FCT_TARGET;
878 data[0] = data[1] = 0;
879 qla2x00_post_async_adisc_work(vha, fcport,
882 case DSC_LS_PLOGI_COMP:
883 if (vha->hw->flags.edif_enabled &&
884 vha->e_dbell.db_flags & EDB_ACTIVE) {
885 /* check to see if App support secure or not */
886 qla24xx_post_gpdb_work(vha, fcport, 0);
889 if (fcport_is_bigger(fcport)) {
890 /* local adapter is smaller */
891 if (fcport->loop_id != FC_NO_LOOP_ID)
892 qla2x00_clear_loop_id(fcport);
894 fcport->loop_id = loop_id;
895 qla24xx_fcport_handle_login(vha,
901 if (fcport_is_smaller(fcport)) {
902 /* local adapter is bigger */
903 if (fcport->loop_id != FC_NO_LOOP_ID)
904 qla2x00_clear_loop_id(fcport);
906 fcport->loop_id = loop_id;
907 qla24xx_fcport_handle_login(vha,
913 } /* switch (ha->current_topology) */
917 switch (vha->hw->current_topology) {
920 for (i = 0; i < n; i++) {
922 id.b.domain = e->port_id[0];
923 id.b.area = e->port_id[1];
924 id.b.al_pa = e->port_id[2];
926 loop_id = le16_to_cpu(e->nport_handle);
928 if (fcport->d_id.b24 == id.b24) {
930 qla2x00_find_fcport_by_wwpn(vha,
932 if (conflict_fcport) {
933 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
935 "%s %d %8phC post del sess\n",
937 conflict_fcport->port_name);
938 qlt_schedule_sess_for_deletion
943 * FW already picked this loop id for
946 if (fcport->loop_id == loop_id)
947 fcport->loop_id = FC_NO_LOOP_ID;
949 qla24xx_fcport_handle_login(vha, fcport);
952 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
953 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
954 if (fcport->n2n_link_reset_cnt < 2) {
955 fcport->n2n_link_reset_cnt++;
957 * remote port is not sending PLOGI.
958 * Reset link to kick start his state
961 set_bit(N2N_LINK_RESET,
964 if (fcport->n2n_chip_reset < 1) {
965 ql_log(ql_log_info, vha, 0x705d,
966 "Chip reset to bring laser down");
967 set_bit(ISP_ABORT_NEEDED,
969 fcport->n2n_chip_reset++;
971 ql_log(ql_log_info, vha, 0x705d,
972 "Remote port %8ph is not coming back\n",
974 fcport->scan_state = 0;
977 qla2xxx_wake_dpc(vha);
980 * report port suppose to do PLOGI. Give him
981 * more time. FW will catch it.
983 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
987 qla24xx_fcport_handle_login(vha, fcport);
995 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
997 struct scsi_qla_host *vha = sp->vha;
999 struct fc_port *fcport = NULL, *tf;
1000 u16 i, n = 0, loop_id;
1001 struct event_arg ea;
1002 struct get_name_list_extended *e;
1007 ql_dbg(ql_dbg_disc, vha, 0x20e7,
1008 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1009 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
1010 sp->u.iocb_cmd.u.mbx.in_mb[2]);
1013 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
1014 memset(&ea, 0, sizeof(ea));
1018 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
1019 sizeof(struct get_name_list_extended)) {
1020 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
1021 sizeof(struct get_name_list_extended);
1022 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
1025 for (i = 0; i < n; i++) {
1027 loop_id = le16_to_cpu(e->nport_handle);
1028 /* mask out reserve bit */
1029 loop_id = (loop_id & 0x7fff);
1030 set_bit(loop_id, vha->hw->loop_id_map);
1031 wwn = wwn_to_u64(e->port_name);
1033 ql_dbg(ql_dbg_disc, vha, 0x20e8,
1034 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1035 __func__, &wwn, e->port_id[2], e->port_id[1],
1036 e->port_id[0], e->current_login_state, e->last_login_state,
1037 (loop_id & 0x7fff));
1040 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1044 if (!list_empty(&vha->gnl.fcports))
1045 list_splice_init(&vha->gnl.fcports, &h);
1046 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1048 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1049 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1050 list_del_init(&fcport->gnl_entry);
1051 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1052 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1055 qla24xx_handle_gnl_done_event(vha, &ea);
1058 /* create new fcport if fw has knowledge of new sessions */
1059 for (i = 0; i < n; i++) {
1064 wwn = wwn_to_u64(e->port_name);
1067 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1068 if (!memcmp((u8 *)&wwn, fcport->port_name,
1075 id.b.domain = e->port_id[2];
1076 id.b.area = e->port_id[1];
1077 id.b.al_pa = e->port_id[0];
1080 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1081 ql_dbg(ql_dbg_disc, vha, 0x2065,
1082 "%s %d %8phC %06x post new sess\n",
1083 __func__, __LINE__, (u8 *)&wwn, id.b24);
1084 wwnn = wwn_to_u64(e->node_name);
1085 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1086 (u8 *)&wwnn, NULL, 0);
1090 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1092 if (!list_empty(&vha->gnl.fcports)) {
1094 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1096 list_del_init(&fcport->gnl_entry);
1097 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1098 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1102 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1105 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1108 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1111 int rval = QLA_FUNCTION_FAILED;
1112 unsigned long flags;
1115 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1118 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1119 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1121 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1122 fcport->flags |= FCF_ASYNC_SENT;
1123 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1124 fcport->last_rscn_gen = fcport->rscn_gen;
1125 fcport->last_login_gen = fcport->login_gen;
1127 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1128 if (vha->gnl.sent) {
1129 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1133 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1136 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1140 sp->type = SRB_MB_IOCB;
1141 sp->name = "gnlist";
1142 sp->gen1 = fcport->rscn_gen;
1143 sp->gen2 = fcport->login_gen;
1144 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1145 qla24xx_async_gnl_sp_done);
1147 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1148 mb[0] = MBC_PORT_NODE_NAME_LIST;
1149 mb[1] = BIT_2 | BIT_3;
1150 mb[2] = MSW(vha->gnl.ldma);
1151 mb[3] = LSW(vha->gnl.ldma);
1152 mb[6] = MSW(MSD(vha->gnl.ldma));
1153 mb[7] = LSW(MSD(vha->gnl.ldma));
1154 mb[8] = vha->gnl.size;
1155 mb[9] = vha->vp_idx;
1157 ql_dbg(ql_dbg_disc, vha, 0x20da,
1158 "Async-%s - OUT WWPN %8phC hndl %x\n",
1159 sp->name, fcport->port_name, sp->handle);
1161 rval = qla2x00_start_sp(sp);
1162 if (rval != QLA_SUCCESS)
1169 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1171 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
1175 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1177 struct qla_work_evt *e;
1179 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1181 return QLA_FUNCTION_FAILED;
1183 e->u.fcport.fcport = fcport;
1184 fcport->flags |= FCF_ASYNC_ACTIVE;
1185 return qla2x00_post_work(vha, e);
1188 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1190 struct scsi_qla_host *vha = sp->vha;
1191 struct qla_hw_data *ha = vha->hw;
1192 fc_port_t *fcport = sp->fcport;
1193 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1194 struct event_arg ea;
1196 ql_dbg(ql_dbg_disc, vha, 0x20db,
1197 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1198 sp->name, res, fcport->port_name, mb[1], mb[2]);
1200 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1202 if (res == QLA_FUNCTION_TIMEOUT)
1205 memset(&ea, 0, sizeof(ea));
1209 qla24xx_handle_gpdb_event(vha, &ea);
1212 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1213 sp->u.iocb_cmd.u.mbx.in_dma);
1215 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1218 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1220 struct qla_work_evt *e;
1222 if (vha->host->active_mode == MODE_TARGET)
1223 return QLA_FUNCTION_FAILED;
1225 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1227 return QLA_FUNCTION_FAILED;
1229 e->u.fcport.fcport = fcport;
1231 return qla2x00_post_work(vha, e);
1234 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1236 struct scsi_qla_host *vha = sp->vha;
1237 struct srb_iocb *lio = &sp->u.iocb_cmd;
1238 struct event_arg ea;
1240 ql_dbg(ql_dbg_disc, vha, 0x2129,
1241 "%s %8phC res %x\n", __func__,
1242 sp->fcport->port_name, res);
1244 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1246 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1247 memset(&ea, 0, sizeof(ea));
1248 ea.fcport = sp->fcport;
1249 ea.data[0] = lio->u.logio.data[0];
1250 ea.data[1] = lio->u.logio.data[1];
1251 ea.iop[0] = lio->u.logio.iop[0];
1252 ea.iop[1] = lio->u.logio.iop[1];
1254 if (res == QLA_OS_TIMER_EXPIRED)
1255 ea.data[0] = QLA_OS_TIMER_EXPIRED;
1257 ea.data[0] = MBS_COMMAND_ERROR;
1259 qla24xx_handle_prli_done_event(vha, &ea);
1262 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1266 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1269 struct srb_iocb *lio;
1270 int rval = QLA_FUNCTION_FAILED;
1272 if (!vha->flags.online) {
1273 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1274 __func__, __LINE__, fcport->port_name);
1278 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1279 fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1280 qla_dual_mode_enabled(vha)) {
1281 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1282 __func__, __LINE__, fcport->port_name);
1286 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1290 fcport->flags |= FCF_ASYNC_SENT;
1291 fcport->logout_completed = 0;
1293 sp->type = SRB_PRLI_CMD;
1295 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1296 qla2x00_async_prli_sp_done);
1298 lio = &sp->u.iocb_cmd;
1299 lio->u.logio.flags = 0;
1301 if (NVME_TARGET(vha->hw, fcport))
1302 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1304 ql_dbg(ql_dbg_disc, vha, 0x211b,
1305 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1306 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1307 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1308 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1310 rval = qla2x00_start_sp(sp);
1311 if (rval != QLA_SUCCESS) {
1312 fcport->flags |= FCF_LOGIN_NEEDED;
1313 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1321 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1322 fcport->flags &= ~FCF_ASYNC_SENT;
1326 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1328 struct qla_work_evt *e;
1330 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1332 return QLA_FUNCTION_FAILED;
1334 e->u.fcport.fcport = fcport;
1335 e->u.fcport.opt = opt;
1336 fcport->flags |= FCF_ASYNC_ACTIVE;
1337 return qla2x00_post_work(vha, e);
1340 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1343 struct srb_iocb *mbx;
1344 int rval = QLA_FUNCTION_FAILED;
1347 struct port_database_24xx *pd;
1348 struct qla_hw_data *ha = vha->hw;
1350 if (IS_SESSION_DELETED(fcport)) {
1351 ql_log(ql_log_warn, vha, 0xffff,
1352 "%s: %8phC is being delete - not sending command.\n",
1353 __func__, fcport->port_name);
1354 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1358 if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1359 ql_log(ql_log_warn, vha, 0xffff,
1360 "%s: %8phC online %d flags %x - not sending command.\n",
1361 __func__, fcport->port_name, vha->flags.online, fcport->flags);
1365 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1369 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1371 fcport->flags |= FCF_ASYNC_SENT;
1372 sp->type = SRB_MB_IOCB;
1374 sp->gen1 = fcport->rscn_gen;
1375 sp->gen2 = fcport->login_gen;
1376 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1377 qla24xx_async_gpdb_sp_done);
1379 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1381 ql_log(ql_log_warn, vha, 0xd043,
1382 "Failed to allocate port database structure.\n");
1386 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1387 mb[0] = MBC_GET_PORT_DATABASE;
1388 mb[1] = fcport->loop_id;
1389 mb[2] = MSW(pd_dma);
1390 mb[3] = LSW(pd_dma);
1391 mb[6] = MSW(MSD(pd_dma));
1392 mb[7] = LSW(MSD(pd_dma));
1393 mb[9] = vha->vp_idx;
1396 mbx = &sp->u.iocb_cmd;
1397 mbx->u.mbx.in = (void *)pd;
1398 mbx->u.mbx.in_dma = pd_dma;
1400 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1401 "Async-%s %8phC hndl %x opt %x\n",
1402 sp->name, fcport->port_name, sp->handle, opt);
1404 rval = qla2x00_start_sp(sp);
1405 if (rval != QLA_SUCCESS)
1411 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1413 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1414 fcport->flags &= ~FCF_ASYNC_SENT;
1416 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1417 qla24xx_post_gpdb_work(vha, fcport, opt);
1422 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1424 unsigned long flags;
1426 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1427 ea->fcport->login_gen++;
1428 ea->fcport->deleted = 0;
1429 ea->fcport->logout_on_delete = 1;
1431 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1432 vha->fcport_count++;
1433 ea->fcport->login_succ = 1;
1435 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1436 qla24xx_sched_upd_fcport(ea->fcport);
1437 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1438 } else if (ea->fcport->login_succ) {
1440 * We have an existing session. A late RSCN delivery
1441 * must have triggered the session to be re-validate.
1442 * Session is still valid.
1444 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1445 "%s %d %8phC session revalidate success\n",
1446 __func__, __LINE__, ea->fcport->port_name);
1447 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1449 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1453 struct port_database_24xx *pd)
1457 if (pd->secure_login) {
1458 ql_dbg(ql_dbg_disc, vha, 0x104d,
1459 "Secure Login established on %8phC\n",
1461 fcport->flags |= FCF_FCSP_DEVICE;
1463 ql_dbg(ql_dbg_disc, vha, 0x104d,
1464 "non-Secure Login %8phC",
1466 fcport->flags &= ~FCF_FCSP_DEVICE;
1468 if (vha->hw->flags.edif_enabled) {
1469 if (fcport->flags & FCF_FCSP_DEVICE) {
1470 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
1471 /* Start edif prli timer & ring doorbell for app */
1472 fcport->edif.rx_sa_set = 0;
1473 fcport->edif.tx_sa_set = 0;
1474 fcport->edif.rx_sa_pending = 0;
1475 fcport->edif.tx_sa_pending = 0;
1477 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1480 if (vha->e_dbell.db_flags == EDB_ACTIVE) {
1481 ql_dbg(ql_dbg_disc, vha, 0x20ef,
1482 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1483 __func__, __LINE__, fcport->port_name);
1484 fcport->edif.app_started = 1;
1485 fcport->edif.app_sess_online = 1;
1487 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1488 fcport->d_id.b24, 0, fcport);
1492 } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1493 ql_dbg(ql_dbg_disc, vha, 0x2117,
1494 "%s %d %8phC post prli\n",
1495 __func__, __LINE__, fcport->port_name);
1496 qla24xx_post_prli_work(vha, fcport);
1504 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1506 fc_port_t *fcport = ea->fcport;
1507 struct port_database_24xx *pd;
1508 struct srb *sp = ea->sp;
1511 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1513 fcport->flags &= ~FCF_ASYNC_SENT;
1515 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1516 "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
1517 fcport->port_name, fcport->disc_state, pd->current_login_state,
1518 fcport->fc4_type, ea->rc);
1520 if (fcport->disc_state == DSC_DELETE_PEND) {
1521 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1522 __func__, __LINE__, fcport->port_name);
1526 if (NVME_TARGET(vha->hw, fcport))
1527 ls = pd->current_login_state >> 4;
1529 ls = pd->current_login_state & 0xf;
1531 if (ea->sp->gen2 != fcport->login_gen) {
1532 /* target side must have changed it. */
1534 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1535 "%s %8phC generation changed\n",
1536 __func__, fcport->port_name);
1538 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1539 qla_rscn_replay(fcport);
1540 qlt_schedule_sess_for_deletion(fcport);
1541 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1542 __func__, __LINE__, fcport->port_name, ls);
1547 case PDS_PRLI_COMPLETE:
1548 __qla24xx_parse_gpdb(vha, fcport, pd);
1550 case PDS_PLOGI_COMPLETE:
1551 if (qla_chk_secure_login(vha, fcport, pd)) {
1552 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1553 __func__, __LINE__, fcport->port_name, ls);
1557 case PDS_PLOGI_PENDING:
1558 case PDS_PRLI_PENDING:
1559 case PDS_PRLI2_PENDING:
1560 /* Set discovery state back to GNL to Relogin attempt */
1561 if (qla_dual_mode_enabled(vha) ||
1562 qla_ini_mode_enabled(vha)) {
1563 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1564 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1566 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1567 __func__, __LINE__, fcport->port_name, ls);
1569 case PDS_LOGO_PENDING:
1570 case PDS_PORT_UNAVAILABLE:
1572 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1573 __func__, __LINE__, fcport->port_name);
1574 qlt_schedule_sess_for_deletion(fcport);
1577 __qla24xx_handle_gpdb_event(vha, ea);
1580 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1585 ql_dbg(ql_dbg_disc, vha, 0x307b,
1586 "%s %8phC DS %d LS %d lid %d retries=%d\n",
1587 __func__, fcport->port_name, fcport->disc_state,
1588 fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
1590 if (qla_tgt_mode_enabled(vha))
1593 if (qla_dual_mode_enabled(vha)) {
1594 if (N2N_TOPO(vha->hw)) {
1597 mywwn = wwn_to_u64(vha->port_name);
1598 wwn = wwn_to_u64(fcport->port_name);
1601 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1602 && time_after_eq(jiffies,
1603 fcport->plogi_nack_done_deadline))
1609 /* initiator mode */
1613 if (login && fcport->login_retry) {
1614 fcport->login_retry--;
1615 if (fcport->loop_id == FC_NO_LOOP_ID) {
1616 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1617 rc = qla2x00_find_new_loop_id(vha, fcport);
1619 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1620 "%s %d %8phC post del sess - out of loopid\n",
1621 __func__, __LINE__, fcport->port_name);
1622 fcport->scan_state = 0;
1623 qlt_schedule_sess_for_deletion(fcport);
1627 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1628 "%s %d %8phC post login\n",
1629 __func__, __LINE__, fcport->port_name);
1630 qla2x00_post_async_login_work(vha, fcport, NULL);
1634 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1640 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1641 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1642 __func__, fcport->port_name, fcport->disc_state,
1643 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1644 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1645 fcport->login_gen, fcport->loop_id, fcport->scan_state,
1648 if (fcport->scan_state != QLA_FCPORT_FOUND ||
1649 fcport->disc_state == DSC_DELETE_PEND)
1652 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1653 qla_dual_mode_enabled(vha) &&
1654 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1655 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1658 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1659 !N2N_TOPO(vha->hw)) {
1660 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1661 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1666 /* Target won't initiate port login if fabric is present */
1667 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1670 if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
1671 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1675 switch (fcport->disc_state) {
1677 wwn = wwn_to_u64(fcport->node_name);
1678 switch (vha->hw->current_topology) {
1680 if (fcport_is_smaller(fcport)) {
1681 /* this adapter is bigger */
1682 if (fcport->login_retry) {
1683 if (fcport->loop_id == FC_NO_LOOP_ID) {
1684 qla2x00_find_new_loop_id(vha,
1686 fcport->fw_login_state =
1687 DSC_LS_PORT_UNAVAIL;
1689 fcport->login_retry--;
1690 qla_post_els_plogi_work(vha, fcport);
1692 ql_log(ql_log_info, vha, 0x705d,
1693 "Unable to reach remote port %8phC",
1697 qla24xx_post_gnl_work(vha, fcport);
1702 ql_dbg(ql_dbg_disc, vha, 0xffff,
1703 "%s %d %8phC post GNNID\n",
1704 __func__, __LINE__, fcport->port_name);
1705 qla24xx_post_gnnid_work(vha, fcport);
1706 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1707 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1708 "%s %d %8phC post gnl\n",
1709 __func__, __LINE__, fcport->port_name);
1710 qla24xx_post_gnl_work(vha, fcport);
1712 qla_chk_n2n_b4_login(vha, fcport);
1719 switch (vha->hw->current_topology) {
1721 if ((fcport->current_login_state & 0xf) == 0x6) {
1722 ql_dbg(ql_dbg_disc, vha, 0x2118,
1723 "%s %d %8phC post GPDB work\n",
1724 __func__, __LINE__, fcport->port_name);
1725 fcport->chip_reset =
1726 vha->hw->base_qpair->chip_reset;
1727 qla24xx_post_gpdb_work(vha, fcport, 0);
1729 ql_dbg(ql_dbg_disc, vha, 0x2118,
1730 "%s %d %8phC post %s PRLI\n",
1731 __func__, __LINE__, fcport->port_name,
1732 NVME_TARGET(vha->hw, fcport) ? "NVME" :
1734 qla24xx_post_prli_work(vha, fcport);
1738 if (fcport->login_pause) {
1739 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1740 "%s %d %8phC exit\n",
1743 fcport->last_rscn_gen = fcport->rscn_gen;
1744 fcport->last_login_gen = fcport->login_gen;
1745 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1748 qla_chk_n2n_b4_login(vha, fcport);
1753 case DSC_LOGIN_FAILED:
1754 if (N2N_TOPO(vha->hw))
1755 qla_chk_n2n_b4_login(vha, fcport);
1757 qlt_schedule_sess_for_deletion(fcport);
1760 case DSC_LOGIN_COMPLETE:
1761 /* recheck login state */
1762 data[0] = data[1] = 0;
1763 qla2x00_post_async_adisc_work(vha, fcport, data);
1766 case DSC_LOGIN_PEND:
1767 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1768 qla24xx_post_prli_work(vha, fcport);
1771 case DSC_UPD_FCPORT:
1772 sec = jiffies_to_msecs(jiffies -
1773 fcport->jiffies_at_registration)/1000;
1774 if (fcport->sec_since_registration < sec && sec &&
1776 fcport->sec_since_registration = sec;
1777 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1778 "%s %8phC - Slow Rport registration(%d Sec)\n",
1779 __func__, fcport->port_name, sec);
1782 if (fcport->next_disc_state != DSC_DELETE_PEND)
1783 fcport->next_disc_state = DSC_ADISC;
1784 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1794 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1795 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1797 struct qla_work_evt *e;
1799 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1801 return QLA_FUNCTION_FAILED;
1803 e->u.new_sess.id = *id;
1804 e->u.new_sess.pla = pla;
1805 e->u.new_sess.fc4_type = fc4_type;
1806 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1808 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1810 return qla2x00_post_work(vha, e);
1813 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1816 unsigned long flags;
1818 switch (ea->id.b.rsvd_1) {
1819 case RSCN_PORT_ADDR:
1820 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1822 if (fcport->flags & FCF_FCP2_DEVICE) {
1823 ql_dbg(ql_dbg_disc, vha, 0x2115,
1824 "Delaying session delete for FCP2 portid=%06x %8phC ",
1825 fcport->d_id.b24, fcport->port_name);
1828 fcport->scan_needed = 1;
1832 case RSCN_AREA_ADDR:
1833 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1834 if (fcport->flags & FCF_FCP2_DEVICE)
1837 if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
1838 fcport->scan_needed = 1;
1844 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1845 if (fcport->flags & FCF_FCP2_DEVICE)
1848 if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
1849 fcport->scan_needed = 1;
1856 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1857 if (fcport->flags & FCF_FCP2_DEVICE)
1860 fcport->scan_needed = 1;
1866 spin_lock_irqsave(&vha->work_lock, flags);
1867 if (vha->scan.scan_flags == 0) {
1868 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1869 vha->scan.scan_flags |= SF_QUEUED;
1870 schedule_delayed_work(&vha->scan.scan_work, 5);
1872 spin_unlock_irqrestore(&vha->work_lock, flags);
1875 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1876 struct event_arg *ea)
1878 fc_port_t *fcport = ea->fcport;
1880 if (test_bit(UNLOADING, &vha->dpc_flags))
1883 ql_dbg(ql_dbg_disc, vha, 0x2102,
1884 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1885 __func__, fcport->port_name, fcport->disc_state,
1886 fcport->fw_login_state, fcport->login_pause,
1887 fcport->deleted, fcport->conflict,
1888 fcport->last_rscn_gen, fcport->rscn_gen,
1889 fcport->last_login_gen, fcport->login_gen,
1892 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1893 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1894 __func__, __LINE__, fcport->port_name);
1895 qla24xx_post_gnl_work(vha, fcport);
1899 qla24xx_fcport_handle_login(vha, fcport);
1902 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1903 struct event_arg *ea)
1905 if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1906 vha->hw->flags.edif_enabled) {
1907 /* check to see if App support Secure */
1908 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1912 /* for pure Target Mode, PRLI will not be initiated */
1913 if (vha->host->active_mode == MODE_TARGET)
1916 ql_dbg(ql_dbg_disc, vha, 0x2118,
1917 "%s %d %8phC post PRLI\n",
1918 __func__, __LINE__, ea->fcport->port_name);
1919 qla24xx_post_prli_work(vha, ea->fcport);
1923 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1924 * to be consumed by the fcport
1926 void qla_rscn_replay(fc_port_t *fcport)
1928 struct event_arg ea;
1930 switch (fcport->disc_state) {
1931 case DSC_DELETE_PEND:
1937 if (fcport->scan_needed) {
1938 memset(&ea, 0, sizeof(ea));
1939 ea.id = fcport->d_id;
1940 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1941 qla2x00_handle_rscn(fcport->vha, &ea);
1946 qla2x00_tmf_iocb_timeout(void *data)
1949 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1951 unsigned long flags;
1953 rc = qla24xx_async_abort_cmd(sp, false);
1955 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1956 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1957 if (sp->qpair->req->outstanding_cmds[h] == sp) {
1958 sp->qpair->req->outstanding_cmds[h] = NULL;
1962 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1963 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
1964 tmf->u.tmf.data = QLA_FUNCTION_FAILED;
1965 complete(&tmf->u.tmf.comp);
1969 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1971 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1973 complete(&tmf->u.tmf.comp);
1977 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1980 struct scsi_qla_host *vha = fcport->vha;
1981 struct srb_iocb *tm_iocb;
1983 int rval = QLA_FUNCTION_FAILED;
1986 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1990 sp->type = SRB_TM_CMD;
1992 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
1993 qla2x00_tmf_sp_done);
1994 sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
1996 tm_iocb = &sp->u.iocb_cmd;
1997 init_completion(&tm_iocb->u.tmf.comp);
1998 tm_iocb->u.tmf.flags = flags;
1999 tm_iocb->u.tmf.lun = lun;
2001 ql_dbg(ql_dbg_taskm, vha, 0x802f,
2002 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
2003 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2004 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2006 rval = qla2x00_start_sp(sp);
2007 if (rval != QLA_SUCCESS)
2009 wait_for_completion(&tm_iocb->u.tmf.comp);
2011 rval = tm_iocb->u.tmf.data;
2013 if (rval != QLA_SUCCESS) {
2014 ql_log(ql_log_warn, vha, 0x8030,
2015 "TM IOCB failed (%x).\n", rval);
2018 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
2019 flags = tm_iocb->u.tmf.flags;
2020 lun = (uint16_t)tm_iocb->u.tmf.lun;
2022 /* Issue Marker IOCB */
2023 qla2x00_marker(vha, vha->hw->base_qpair,
2024 fcport->loop_id, lun,
2025 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
2030 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2031 fcport->flags &= ~FCF_ASYNC_SENT;
2037 qla24xx_async_abort_command(srb_t *sp)
2039 unsigned long flags = 0;
2042 fc_port_t *fcport = sp->fcport;
2043 struct qla_qpair *qpair = sp->qpair;
2044 struct scsi_qla_host *vha = fcport->vha;
2045 struct req_que *req = qpair->req;
2047 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2048 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2049 if (req->outstanding_cmds[handle] == sp)
2052 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2054 if (handle == req->num_outstanding_cmds) {
2055 /* Command not found. */
2056 return QLA_ERR_NOT_FOUND;
2058 if (sp->type == SRB_FXIOCB_DCMD)
2059 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2060 FXDISC_ABORT_IOCTL);
2062 return qla24xx_async_abort_cmd(sp, true);
2066 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2069 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2072 switch (ea->data[0]) {
2073 case MBS_COMMAND_COMPLETE:
2074 ql_dbg(ql_dbg_disc, vha, 0x2118,
2075 "%s %d %8phC post gpdb\n",
2076 __func__, __LINE__, ea->fcport->port_name);
2078 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2079 ea->fcport->logout_on_delete = 1;
2080 ea->fcport->nvme_prli_service_param = ea->iop[0];
2081 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
2082 ea->fcport->nvme_first_burst_size =
2083 (ea->iop[1] & 0xffff) * 512;
2085 ea->fcport->nvme_first_burst_size = 0;
2086 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2090 ql_dbg(ql_dbg_disc, vha, 0x2118,
2091 "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2092 __func__, __LINE__, ea->fcport->port_name,
2093 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2094 "FCP" : "NVMe", ea->fcport->fc4_type,
2095 (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
2098 if (NVME_FCP_TARGET(ea->fcport)) {
2099 if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
2100 ea->fcport->do_prli_nvme = 0;
2102 ea->fcport->do_prli_nvme = 1;
2104 ea->fcport->do_prli_nvme = 0;
2107 if (N2N_TOPO(vha->hw)) {
2108 if (ea->fcport->n2n_link_reset_cnt <
2109 vha->hw->login_retry_count) {
2110 ea->fcport->n2n_link_reset_cnt++;
2111 vha->relogin_jif = jiffies + 2 * HZ;
2113 * PRLI failed. Reset link to kick start
2116 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2117 qla2xxx_wake_dpc(vha);
2119 ql_log(ql_log_warn, vha, 0x2119,
2120 "%s %d %8phC Unable to reconnect\n",
2122 ea->fcport->port_name);
2126 * switch connect. login failed. Take connection down
2127 * and allow relogin to retrigger
2129 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2130 ea->fcport->keep_nport_handle = 0;
2131 ea->fcport->logout_on_delete = 1;
2132 qlt_schedule_sess_for_deletion(ea->fcport);
2139 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2141 port_id_t cid; /* conflict Nport id */
2143 struct fc_port *conflict_fcport;
2144 unsigned long flags;
2145 struct fc_port *fcport = ea->fcport;
2147 ql_dbg(ql_dbg_disc, vha, 0xffff,
2148 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2149 __func__, fcport->port_name, fcport->disc_state,
2150 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2151 ea->sp->gen1, fcport->rscn_gen,
2152 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2154 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2155 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2156 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2157 "%s %d %8phC Remote is trying to login\n",
2158 __func__, __LINE__, fcport->port_name);
2162 if ((fcport->disc_state == DSC_DELETE_PEND) ||
2163 (fcport->disc_state == DSC_DELETED)) {
2164 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2168 if (ea->sp->gen2 != fcport->login_gen) {
2169 /* target side must have changed it. */
2170 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2171 "%s %8phC generation changed\n",
2172 __func__, fcport->port_name);
2173 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2175 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2176 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2177 "%s %8phC RSCN generation changed\n",
2178 __func__, fcport->port_name);
2179 qla_rscn_replay(fcport);
2180 qlt_schedule_sess_for_deletion(fcport);
2184 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2187 switch (ea->data[0]) {
2188 case MBS_COMMAND_COMPLETE:
2190 * Driver must validate login state - If PRLI not complete,
2191 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2194 if (vha->hw->flags.edif_enabled) {
2195 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2196 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2197 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2198 ea->fcport->logout_on_delete = 1;
2199 ea->fcport->send_els_logo = 0;
2200 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
2201 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2203 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2205 if (NVME_TARGET(vha->hw, fcport)) {
2206 ql_dbg(ql_dbg_disc, vha, 0x2117,
2207 "%s %d %8phC post prli\n",
2208 __func__, __LINE__, fcport->port_name);
2209 qla24xx_post_prli_work(vha, fcport);
2211 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2212 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2213 __func__, __LINE__, fcport->port_name,
2214 fcport->loop_id, fcport->d_id.b24);
2216 set_bit(fcport->loop_id, vha->hw->loop_id_map);
2217 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2218 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2219 fcport->logout_on_delete = 1;
2220 fcport->send_els_logo = 0;
2221 fcport->fw_login_state = DSC_LS_PRLI_COMP;
2222 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2224 qla24xx_post_gpdb_work(vha, fcport, 0);
2228 case MBS_COMMAND_ERROR:
2229 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2230 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2232 qlt_schedule_sess_for_deletion(ea->fcport);
2234 case MBS_LOOP_ID_USED:
2235 /* data[1] = IO PARAM 1 = nport ID */
2236 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2237 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2238 cid.b.al_pa = ea->iop[1] & 0xff;
2241 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2242 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2243 __func__, __LINE__, ea->fcport->port_name,
2244 ea->fcport->loop_id, cid.b24);
2246 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2247 ea->fcport->loop_id = FC_NO_LOOP_ID;
2248 qla24xx_post_gnl_work(vha, ea->fcport);
2250 case MBS_PORT_ID_USED:
2251 lid = ea->iop[1] & 0xffff;
2252 qlt_find_sess_invalidate_other(vha,
2253 wwn_to_u64(ea->fcport->port_name),
2254 ea->fcport->d_id, lid, &conflict_fcport);
2256 if (conflict_fcport) {
2258 * Another fcport share the same loop_id/nport id.
2259 * Conflict fcport needs to finish cleanup before this
2260 * fcport can proceed to login.
2262 conflict_fcport->conflict = ea->fcport;
2263 ea->fcport->login_pause = 1;
2265 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2266 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2267 __func__, __LINE__, ea->fcport->port_name,
2268 ea->fcport->d_id.b24, lid);
2270 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2271 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2272 __func__, __LINE__, ea->fcport->port_name,
2273 ea->fcport->d_id.b24, lid);
2275 qla2x00_clear_loop_id(ea->fcport);
2276 set_bit(lid, vha->hw->loop_id_map);
2277 ea->fcport->loop_id = lid;
2278 ea->fcport->keep_nport_handle = 0;
2279 ea->fcport->logout_on_delete = 1;
2280 qlt_schedule_sess_for_deletion(ea->fcport);
2287 /****************************************************************************/
2288 /* QLogic ISP2x00 Hardware Support Functions. */
2289 /****************************************************************************/
2292 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2294 int rval = QLA_SUCCESS;
2295 struct qla_hw_data *ha = vha->hw;
2296 uint32_t idc_major_ver, idc_minor_ver;
2299 qla83xx_idc_lock(vha, 0);
2301 /* SV: TODO: Assign initialization timeout from
2302 * flash-info / other param
2304 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2305 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2307 /* Set our fcoe function presence */
2308 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2309 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2310 "Error while setting DRV-Presence.\n");
2311 rval = QLA_FUNCTION_FAILED;
2315 /* Decide the reset ownership */
2316 qla83xx_reset_ownership(vha);
2319 * On first protocol driver load:
2320 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2322 * Others: Check compatibility with current IDC Major version.
2324 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2325 if (ha->flags.nic_core_reset_owner) {
2326 /* Set IDC Major version */
2327 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2328 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2330 /* Clearing IDC-Lock-Recovery register */
2331 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2332 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2334 * Clear further IDC participation if we are not compatible with
2335 * the current IDC Major Version.
2337 ql_log(ql_log_warn, vha, 0xb07d,
2338 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2339 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2340 __qla83xx_clear_drv_presence(vha);
2341 rval = QLA_FUNCTION_FAILED;
2344 /* Each function sets its supported Minor version. */
2345 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2346 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2347 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2349 if (ha->flags.nic_core_reset_owner) {
2350 memset(config, 0, sizeof(config));
2351 if (!qla81xx_get_port_config(vha, config))
2352 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2356 rval = qla83xx_idc_state_handler(vha);
2359 qla83xx_idc_unlock(vha, 0);
2365 * qla2x00_initialize_adapter
2369 * ha = adapter block pointer.
2375 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2378 struct qla_hw_data *ha = vha->hw;
2379 struct req_que *req = ha->req_q_map[0];
2380 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2382 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2383 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2385 /* Clear adapter flags. */
2386 vha->flags.online = 0;
2387 ha->flags.chip_reset_done = 0;
2388 vha->flags.reset_active = 0;
2389 ha->flags.pci_channel_io_perm_failure = 0;
2390 ha->flags.eeh_busy = 0;
2391 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2392 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2393 atomic_set(&vha->loop_state, LOOP_DOWN);
2394 vha->device_flags = DFLG_NO_CABLE;
2396 vha->flags.management_server_logged_in = 0;
2397 vha->marker_needed = 0;
2398 ha->isp_abort_cnt = 0;
2399 ha->beacon_blink_led = 0;
2401 set_bit(0, ha->req_qid_map);
2402 set_bit(0, ha->rsp_qid_map);
2404 ql_dbg(ql_dbg_init, vha, 0x0040,
2405 "Configuring PCI space...\n");
2406 rval = ha->isp_ops->pci_config(vha);
2408 ql_log(ql_log_warn, vha, 0x0044,
2409 "Unable to configure PCI space.\n");
2413 ha->isp_ops->reset_chip(vha);
2415 /* Check for secure flash support */
2416 if (IS_QLA28XX(ha)) {
2417 if (rd_reg_word(®->mailbox12) & BIT_0)
2418 ha->flags.secure_adapter = 1;
2419 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2420 (ha->flags.secure_adapter) ? "Yes" : "No");
2424 rval = qla2xxx_get_flash_info(vha);
2426 ql_log(ql_log_fatal, vha, 0x004f,
2427 "Unable to validate FLASH data.\n");
2431 if (IS_QLA8044(ha)) {
2432 qla8044_read_reset_template(vha);
2434 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2435 * If DONRESET_BIT0 is set, drivers should not set dev_state
2436 * to NEED_RESET. But if NEED_RESET is set, drivers should
2437 * should honor the reset. */
2438 if (ql2xdontresethba == 1)
2439 qla8044_set_idc_dontreset(vha);
2442 ha->isp_ops->get_flash_version(vha, req->ring);
2443 ql_dbg(ql_dbg_init, vha, 0x0061,
2444 "Configure NVRAM parameters...\n");
2446 /* Let priority default to FCP, can be overridden by nvram_config */
2447 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2449 ha->isp_ops->nvram_config(vha);
2451 if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2452 ha->fc4_type_priority != FC4_PRIORITY_NVME)
2453 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2455 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2456 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2458 if (ha->flags.disable_serdes) {
2459 /* Mask HBA via NVRAM settings? */
2460 ql_log(ql_log_info, vha, 0x0077,
2461 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2462 return QLA_FUNCTION_FAILED;
2465 ql_dbg(ql_dbg_init, vha, 0x0078,
2466 "Verifying loaded RISC code...\n");
2468 /* If smartsan enabled then require fdmi and rdp enabled */
2474 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2475 rval = ha->isp_ops->chip_diag(vha);
2478 rval = qla2x00_setup_chip(vha);
2483 if (IS_QLA84XX(ha)) {
2484 ha->cs84xx = qla84xx_get_chip(vha);
2486 ql_log(ql_log_warn, vha, 0x00d0,
2487 "Unable to configure ISP84XX.\n");
2488 return QLA_FUNCTION_FAILED;
2492 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2493 rval = qla2x00_init_rings(vha);
2495 /* No point in continuing if firmware initialization failed. */
2496 if (rval != QLA_SUCCESS)
2499 ha->flags.chip_reset_done = 1;
2501 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2502 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2503 rval = qla84xx_init_chip(vha);
2504 if (rval != QLA_SUCCESS) {
2505 ql_log(ql_log_warn, vha, 0x00d4,
2506 "Unable to initialize ISP84XX.\n");
2507 qla84xx_put_chip(vha);
2511 /* Load the NIC Core f/w if we are the first protocol driver. */
2512 if (IS_QLA8031(ha)) {
2513 rval = qla83xx_nic_core_fw_load(vha);
2515 ql_log(ql_log_warn, vha, 0x0124,
2516 "Error in initializing NIC Core f/w.\n");
2519 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2520 qla24xx_read_fcp_prio_cfg(vha);
2522 if (IS_P3P_TYPE(ha))
2523 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2525 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2531 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2534 * Returns 0 on success.
2537 qla2100_pci_config(scsi_qla_host_t *vha)
2540 unsigned long flags;
2541 struct qla_hw_data *ha = vha->hw;
2542 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2544 pci_set_master(ha->pdev);
2545 pci_try_set_mwi(ha->pdev);
2547 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2548 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2549 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2551 pci_disable_rom(ha->pdev);
2553 /* Get PCI bus information. */
2554 spin_lock_irqsave(&ha->hardware_lock, flags);
2555 ha->pci_attr = rd_reg_word(®->ctrl_status);
2556 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2562 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2565 * Returns 0 on success.
2568 qla2300_pci_config(scsi_qla_host_t *vha)
2571 unsigned long flags = 0;
2573 struct qla_hw_data *ha = vha->hw;
2574 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2576 pci_set_master(ha->pdev);
2577 pci_try_set_mwi(ha->pdev);
2579 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2580 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2582 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2583 w &= ~PCI_COMMAND_INTX_DISABLE;
2584 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2587 * If this is a 2300 card and not 2312, reset the
2588 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2589 * the 2310 also reports itself as a 2300 so we need to get the
2590 * fb revision level -- a 6 indicates it really is a 2300 and
2593 if (IS_QLA2300(ha)) {
2594 spin_lock_irqsave(&ha->hardware_lock, flags);
2597 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2598 for (cnt = 0; cnt < 30000; cnt++) {
2599 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0)
2605 /* Select FPM registers. */
2606 wrt_reg_word(®->ctrl_status, 0x20);
2607 rd_reg_word(®->ctrl_status);
2609 /* Get the fb rev level */
2610 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2612 if (ha->fb_rev == FPM_2300)
2613 pci_clear_mwi(ha->pdev);
2615 /* Deselect FPM registers. */
2616 wrt_reg_word(®->ctrl_status, 0x0);
2617 rd_reg_word(®->ctrl_status);
2619 /* Release RISC module. */
2620 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2621 for (cnt = 0; cnt < 30000; cnt++) {
2622 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0)
2628 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2631 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2633 pci_disable_rom(ha->pdev);
2635 /* Get PCI bus information. */
2636 spin_lock_irqsave(&ha->hardware_lock, flags);
2637 ha->pci_attr = rd_reg_word(®->ctrl_status);
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2644 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2647 * Returns 0 on success.
2650 qla24xx_pci_config(scsi_qla_host_t *vha)
2653 unsigned long flags = 0;
2654 struct qla_hw_data *ha = vha->hw;
2655 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2657 pci_set_master(ha->pdev);
2658 pci_try_set_mwi(ha->pdev);
2660 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2661 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2662 w &= ~PCI_COMMAND_INTX_DISABLE;
2663 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2665 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2667 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2668 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2669 pcix_set_mmrbc(ha->pdev, 2048);
2671 /* PCIe -- adjust Maximum Read Request Size (2048). */
2672 if (pci_is_pcie(ha->pdev))
2673 pcie_set_readrq(ha->pdev, 4096);
2675 pci_disable_rom(ha->pdev);
2677 ha->chip_revision = ha->pdev->revision;
2679 /* Get PCI bus information. */
2680 spin_lock_irqsave(&ha->hardware_lock, flags);
2681 ha->pci_attr = rd_reg_dword(®->ctrl_status);
2682 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2688 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2691 * Returns 0 on success.
2694 qla25xx_pci_config(scsi_qla_host_t *vha)
2697 struct qla_hw_data *ha = vha->hw;
2699 pci_set_master(ha->pdev);
2700 pci_try_set_mwi(ha->pdev);
2702 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2703 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2704 w &= ~PCI_COMMAND_INTX_DISABLE;
2705 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2707 /* PCIe -- adjust Maximum Read Request Size (2048). */
2708 if (pci_is_pcie(ha->pdev))
2709 pcie_set_readrq(ha->pdev, 4096);
2711 pci_disable_rom(ha->pdev);
2713 ha->chip_revision = ha->pdev->revision;
2719 * qla2x00_isp_firmware() - Choose firmware image.
2722 * Returns 0 on success.
2725 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2728 uint16_t loop_id, topo, sw_cap;
2729 uint8_t domain, area, al_pa;
2730 struct qla_hw_data *ha = vha->hw;
2732 /* Assume loading risc code */
2733 rval = QLA_FUNCTION_FAILED;
2735 if (ha->flags.disable_risc_code_load) {
2736 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2738 /* Verify checksum of loaded RISC code. */
2739 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2740 if (rval == QLA_SUCCESS) {
2741 /* And, verify we are not in ROM code. */
2742 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2743 &area, &domain, &topo, &sw_cap);
2748 ql_dbg(ql_dbg_init, vha, 0x007a,
2749 "**** Load RISC code ****.\n");
2755 * qla2x00_reset_chip() - Reset ISP chip.
2758 * Returns 0 on success.
2761 qla2x00_reset_chip(scsi_qla_host_t *vha)
2763 unsigned long flags = 0;
2764 struct qla_hw_data *ha = vha->hw;
2765 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2768 int rval = QLA_FUNCTION_FAILED;
2770 if (unlikely(pci_channel_offline(ha->pdev)))
2773 ha->isp_ops->disable_intrs(ha);
2775 spin_lock_irqsave(&ha->hardware_lock, flags);
2777 /* Turn off master enable */
2779 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2780 cmd &= ~PCI_COMMAND_MASTER;
2781 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2783 if (!IS_QLA2100(ha)) {
2785 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2786 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2787 for (cnt = 0; cnt < 30000; cnt++) {
2788 if ((rd_reg_word(®->hccr) &
2789 HCCR_RISC_PAUSE) != 0)
2794 rd_reg_word(®->hccr); /* PCI Posting. */
2798 /* Select FPM registers. */
2799 wrt_reg_word(®->ctrl_status, 0x20);
2800 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2802 /* FPM Soft Reset. */
2803 wrt_reg_word(®->fpm_diag_config, 0x100);
2804 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2806 /* Toggle Fpm Reset. */
2807 if (!IS_QLA2200(ha)) {
2808 wrt_reg_word(®->fpm_diag_config, 0x0);
2809 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2812 /* Select frame buffer registers. */
2813 wrt_reg_word(®->ctrl_status, 0x10);
2814 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2816 /* Reset frame buffer FIFOs. */
2817 if (IS_QLA2200(ha)) {
2818 WRT_FB_CMD_REG(ha, reg, 0xa000);
2819 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2821 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2823 /* Read back fb_cmd until zero or 3 seconds max */
2824 for (cnt = 0; cnt < 3000; cnt++) {
2825 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2831 /* Select RISC module registers. */
2832 wrt_reg_word(®->ctrl_status, 0);
2833 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2835 /* Reset RISC processor. */
2836 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2837 rd_reg_word(®->hccr); /* PCI Posting. */
2839 /* Release RISC processor. */
2840 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2841 rd_reg_word(®->hccr); /* PCI Posting. */
2844 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
2845 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT);
2847 /* Reset ISP chip. */
2848 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
2850 /* Wait for RISC to recover from reset. */
2851 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2853 * It is necessary to for a delay here since the card doesn't
2854 * respond to PCI reads during a reset. On some architectures
2855 * this will result in an MCA.
2858 for (cnt = 30000; cnt; cnt--) {
2859 if ((rd_reg_word(®->ctrl_status) &
2860 CSR_ISP_SOFT_RESET) == 0)
2867 /* Reset RISC processor. */
2868 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2870 wrt_reg_word(®->semaphore, 0);
2872 /* Release RISC processor. */
2873 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2874 rd_reg_word(®->hccr); /* PCI Posting. */
2876 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2877 for (cnt = 0; cnt < 30000; cnt++) {
2878 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2886 /* Turn on master enable */
2887 cmd |= PCI_COMMAND_MASTER;
2888 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2890 /* Disable RISC pause on FPM parity error. */
2891 if (!IS_QLA2100(ha)) {
2892 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2893 rd_reg_word(®->hccr); /* PCI Posting. */
2896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2902 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2905 * Returns 0 on success.
2908 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2910 uint16_t mb[4] = {0x1010, 0, 1, 0};
2912 if (!IS_QLA81XX(vha->hw))
2915 return qla81xx_write_mpi_register(vha, mb);
2919 qla_chk_risc_recovery(scsi_qla_host_t *vha)
2921 struct qla_hw_data *ha = vha->hw;
2922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2923 __le16 __iomem *mbptr = ®->mailbox0;
2926 int rc = QLA_SUCCESS;
2928 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2931 /* this check is only valid after RISC reset */
2932 mb[0] = rd_reg_word(mbptr);
2935 rc = QLA_FUNCTION_FAILED;
2937 for (i = 1; i < 32; i++) {
2938 mb[i] = rd_reg_word(mbptr);
2942 ql_log(ql_log_warn, vha, 0x1015,
2943 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2944 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
2945 ql_log(ql_log_warn, vha, 0x1015,
2946 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2947 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
2949 ql_log(ql_log_warn, vha, 0x1015,
2950 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2951 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
2953 ql_log(ql_log_warn, vha, 0x1015,
2954 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2955 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
2962 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2965 * Returns 0 on success.
2968 qla24xx_reset_risc(scsi_qla_host_t *vha)
2970 unsigned long flags = 0;
2971 struct qla_hw_data *ha = vha->hw;
2972 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2975 static int abts_cnt; /* ISP abort retry counts */
2976 int rval = QLA_SUCCESS;
2979 spin_lock_irqsave(&ha->hardware_lock, flags);
2982 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2983 for (cnt = 0; cnt < 30000; cnt++) {
2984 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2990 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE))
2991 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2993 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2994 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2995 rd_reg_dword(®->hccr),
2996 rd_reg_dword(®->ctrl_status),
2997 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE));
2999 wrt_reg_dword(®->ctrl_status,
3000 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3001 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3005 /* Wait for firmware to complete NVRAM accesses. */
3006 rd_reg_word(®->mailbox0);
3007 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 &&
3008 rval == QLA_SUCCESS; cnt--) {
3013 rval = QLA_FUNCTION_TIMEOUT;
3016 if (rval == QLA_SUCCESS)
3017 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3019 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3020 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3021 rd_reg_dword(®->hccr),
3022 rd_reg_word(®->mailbox0));
3024 /* Wait for soft-reset to complete. */
3025 rd_reg_dword(®->ctrl_status);
3026 for (cnt = 0; cnt < 60; cnt++) {
3028 if ((rd_reg_dword(®->ctrl_status) &
3029 CSRX_ISP_SOFT_RESET) == 0)
3034 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
3035 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3037 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3038 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3039 rd_reg_dword(®->hccr),
3040 rd_reg_dword(®->ctrl_status));
3042 /* If required, do an MPI FW reset now */
3043 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3044 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3045 if (++abts_cnt < 5) {
3046 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3047 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3050 * We exhausted the ISP abort retries. We have to
3051 * set the board offline.
3054 vha->flags.online = 0;
3059 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
3060 rd_reg_dword(®->hccr);
3062 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
3063 rd_reg_dword(®->hccr);
3065 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
3067 rd_reg_dword(®->hccr);
3069 wd = rd_reg_word(®->mailbox0);
3070 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
3074 if (print && qla_chk_risc_recovery(vha))
3077 wd = rd_reg_word(®->mailbox0);
3079 rval = QLA_FUNCTION_TIMEOUT;
3081 ql_log(ql_log_warn, vha, 0x015e,
3082 "RISC reset timeout\n");
3086 if (rval == QLA_SUCCESS)
3087 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3089 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3090 "Host Risc 0x%x, mailbox0 0x%x\n",
3091 rd_reg_dword(®->hccr),
3092 rd_reg_word(®->mailbox0));
3094 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3096 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3097 "Driver in %s mode\n",
3098 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3100 if (IS_NOPOLLING_TYPE(ha))
3101 ha->isp_ops->enable_intrs(ha);
3107 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3109 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3111 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3112 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
3116 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3118 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3120 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3121 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
3125 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3128 uint delta_msec = 100;
3129 uint elapsed_msec = 0;
3133 if (vha->hw->pdev->subsystem_device != 0x0175 &&
3134 vha->hw->pdev->subsystem_device != 0x0240)
3137 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3141 timeout_msec = TIMEOUT_SEMAPHORE;
3142 n = timeout_msec / delta_msec;
3144 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3145 qla25xx_read_risc_sema_reg(vha, &wd32);
3146 if (wd32 & RISC_SEMAPHORE)
3149 elapsed_msec += delta_msec;
3150 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3154 if (!(wd32 & RISC_SEMAPHORE))
3157 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3160 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3161 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
3162 n = timeout_msec / delta_msec;
3164 qla25xx_read_risc_sema_reg(vha, &wd32);
3165 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3168 elapsed_msec += delta_msec;
3169 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3173 if (wd32 & RISC_SEMAPHORE_FORCE)
3174 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3179 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3186 * qla24xx_reset_chip() - Reset ISP24xx chip.
3189 * Returns 0 on success.
3192 qla24xx_reset_chip(scsi_qla_host_t *vha)
3194 struct qla_hw_data *ha = vha->hw;
3195 int rval = QLA_FUNCTION_FAILED;
3197 if (pci_channel_offline(ha->pdev) &&
3198 ha->flags.pci_channel_io_perm_failure) {
3202 ha->isp_ops->disable_intrs(ha);
3204 qla25xx_manipulate_risc_semaphore(vha);
3206 /* Perform RISC reset. */
3207 rval = qla24xx_reset_risc(vha);
3213 * qla2x00_chip_diag() - Test chip for proper operation.
3216 * Returns 0 on success.
3219 qla2x00_chip_diag(scsi_qla_host_t *vha)
3222 struct qla_hw_data *ha = vha->hw;
3223 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3224 unsigned long flags = 0;
3228 struct req_que *req = ha->req_q_map[0];
3230 /* Assume a failed state */
3231 rval = QLA_FUNCTION_FAILED;
3233 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3234 ®->flash_address);
3236 spin_lock_irqsave(&ha->hardware_lock, flags);
3238 /* Reset ISP chip. */
3239 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
3242 * We need to have a delay here since the card will not respond while
3243 * in reset causing an MCA on some architectures.
3246 data = qla2x00_debounce_register(®->ctrl_status);
3247 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3249 data = rd_reg_word(®->ctrl_status);
3254 goto chip_diag_failed;
3256 ql_dbg(ql_dbg_init, vha, 0x007c,
3257 "Reset register cleared by chip reset.\n");
3259 /* Reset RISC processor. */
3260 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3261 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3263 /* Workaround for QLA2312 PCI parity error */
3264 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3265 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3266 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3268 data = RD_MAILBOX_REG(ha, reg, 0);
3275 goto chip_diag_failed;
3277 /* Check product ID of chip */
3278 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3280 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3281 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3282 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3283 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3284 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3285 mb[3] != PROD_ID_3) {
3286 ql_log(ql_log_warn, vha, 0x0062,
3287 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3288 mb[1], mb[2], mb[3]);
3290 goto chip_diag_failed;
3292 ha->product_id[0] = mb[1];
3293 ha->product_id[1] = mb[2];
3294 ha->product_id[2] = mb[3];
3295 ha->product_id[3] = mb[4];
3297 /* Adjust fw RISC transfer size */
3298 if (req->length > 1024)
3299 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3301 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3304 if (IS_QLA2200(ha) &&
3305 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3306 /* Limit firmware transfer size with a 2200A */
3307 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3309 ha->device_type |= DT_ISP2200A;
3310 ha->fw_transfer_size = 128;
3313 /* Wrap Incoming Mailboxes Test. */
3314 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3316 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3317 rval = qla2x00_mbx_reg_test(vha);
3319 ql_log(ql_log_warn, vha, 0x0080,
3320 "Failed mailbox send register test.\n");
3322 /* Flag a successful rval */
3324 spin_lock_irqsave(&ha->hardware_lock, flags);
3328 ql_log(ql_log_info, vha, 0x0081,
3329 "Chip diagnostics **** FAILED ****.\n");
3331 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3337 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3340 * Returns 0 on success.
3343 qla24xx_chip_diag(scsi_qla_host_t *vha)
3346 struct qla_hw_data *ha = vha->hw;
3347 struct req_que *req = ha->req_q_map[0];
3349 if (IS_P3P_TYPE(ha))
3352 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3354 rval = qla2x00_mbx_reg_test(vha);
3356 ql_log(ql_log_warn, vha, 0x0082,
3357 "Failed mailbox send register test.\n");
3359 /* Flag a successful rval */
3367 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3372 struct qla_hw_data *ha = vha->hw;
3374 if (!IS_FWI2_CAPABLE(ha))
3377 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3378 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3382 ql_dbg(ql_dbg_init, vha, 0x00bd,
3383 "%s: FCE Mem is already allocated.\n",
3388 /* Allocate memory for Fibre Channel Event Buffer. */
3389 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3392 ql_log(ql_log_warn, vha, 0x00be,
3393 "Unable to allocate (%d KB) for FCE.\n",
3398 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3399 ha->fce_mb, &ha->fce_bufs);
3401 ql_log(ql_log_warn, vha, 0x00bf,
3402 "Unable to initialize FCE (%d).\n", rval);
3403 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3407 ql_dbg(ql_dbg_init, vha, 0x00c0,
3408 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3410 ha->flags.fce_enabled = 1;
3411 ha->fce_dma = tc_dma;
3416 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3421 struct qla_hw_data *ha = vha->hw;
3423 if (!IS_FWI2_CAPABLE(ha))
3427 ql_dbg(ql_dbg_init, vha, 0x00bd,
3428 "%s: EFT Mem is already allocated.\n",
3433 /* Allocate memory for Extended Trace Buffer. */
3434 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3437 ql_log(ql_log_warn, vha, 0x00c1,
3438 "Unable to allocate (%d KB) for EFT.\n",
3443 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3445 ql_log(ql_log_warn, vha, 0x00c2,
3446 "Unable to initialize EFT (%d).\n", rval);
3447 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3451 ql_dbg(ql_dbg_init, vha, 0x00c3,
3452 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3454 ha->eft_dma = tc_dma;
3459 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3461 qla2x00_init_fce_trace(vha);
3462 qla2x00_init_eft_trace(vha);
3466 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3468 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3469 eft_size, fce_size, mq_size;
3470 struct qla_hw_data *ha = vha->hw;
3471 struct req_que *req = ha->req_q_map[0];
3472 struct rsp_que *rsp = ha->rsp_q_map[0];
3473 struct qla2xxx_fw_dump *fw_dump;
3476 ql_dbg(ql_dbg_init, vha, 0x00bd,
3477 "Firmware dump already allocated.\n");
3482 ha->fw_dump_cap_flags = 0;
3483 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3484 req_q_size = rsp_q_size = 0;
3486 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3487 fixed_size = sizeof(struct qla2100_fw_dump);
3488 } else if (IS_QLA23XX(ha)) {
3489 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3490 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3492 } else if (IS_FWI2_CAPABLE(ha)) {
3494 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3495 else if (IS_QLA81XX(ha))
3496 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3497 else if (IS_QLA25XX(ha))
3498 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3500 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3502 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3505 if (!IS_QLA83XX(ha))
3506 mq_size = sizeof(struct qla2xxx_mq_chain);
3508 * Allocate maximum buffer size for all queues - Q0.
3509 * Resizing must be done at end-of-dump processing.
3511 mq_size += (ha->max_req_queues - 1) *
3512 (req->length * sizeof(request_t));
3513 mq_size += (ha->max_rsp_queues - 1) *
3514 (rsp->length * sizeof(response_t));
3516 if (ha->tgt.atio_ring)
3517 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3519 qla2x00_init_fce_trace(vha);
3521 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3522 qla2x00_init_eft_trace(vha);
3524 eft_size = EFT_SIZE;
3527 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3528 struct fwdt *fwdt = ha->fwdt;
3531 for (j = 0; j < 2; j++, fwdt++) {
3532 if (!fwdt->template) {
3533 ql_dbg(ql_dbg_init, vha, 0x00ba,
3534 "-> fwdt%u no template\n", j);
3537 ql_dbg(ql_dbg_init, vha, 0x00fa,
3538 "-> fwdt%u calculating fwdump size...\n", j);
3539 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3540 vha, fwdt->template);
3541 ql_dbg(ql_dbg_init, vha, 0x00fa,
3542 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3543 j, fwdt->dump_size);
3544 dump_size += fwdt->dump_size;
3546 /* Add space for spare MPI fw dump. */
3547 dump_size += ha->fwdt[1].dump_size;
3549 req_q_size = req->length * sizeof(request_t);
3550 rsp_q_size = rsp->length * sizeof(response_t);
3551 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3552 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3554 ha->chain_offset = dump_size;
3555 dump_size += mq_size + fce_size;
3556 if (ha->exchoffld_buf)
3557 dump_size += sizeof(struct qla2xxx_offld_chain) +
3559 if (ha->exlogin_buf)
3560 dump_size += sizeof(struct qla2xxx_offld_chain) +
3564 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3566 ql_dbg(ql_dbg_init, vha, 0x00c5,
3567 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3568 __func__, dump_size, ha->fw_dump_len,
3569 ha->fw_dump_alloc_len);
3571 fw_dump = vmalloc(dump_size);
3573 ql_log(ql_log_warn, vha, 0x00c4,
3574 "Unable to allocate (%d KB) for firmware dump.\n",
3577 mutex_lock(&ha->optrom_mutex);
3578 if (ha->fw_dumped) {
3579 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3581 ha->fw_dump = fw_dump;
3582 ha->fw_dump_alloc_len = dump_size;
3583 ql_dbg(ql_dbg_init, vha, 0x00c5,
3584 "Re-Allocated (%d KB) and save firmware dump.\n",
3588 ha->fw_dump = fw_dump;
3590 ha->fw_dump_len = ha->fw_dump_alloc_len =
3592 ql_dbg(ql_dbg_init, vha, 0x00c5,
3593 "Allocated (%d KB) for firmware dump.\n",
3596 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3597 ha->mpi_fw_dump = (char *)fw_dump +
3598 ha->fwdt[1].dump_size;
3599 mutex_unlock(&ha->optrom_mutex);
3603 ha->fw_dump->signature[0] = 'Q';
3604 ha->fw_dump->signature[1] = 'L';
3605 ha->fw_dump->signature[2] = 'G';
3606 ha->fw_dump->signature[3] = 'C';
3607 ha->fw_dump->version = htonl(1);
3609 ha->fw_dump->fixed_size = htonl(fixed_size);
3610 ha->fw_dump->mem_size = htonl(mem_size);
3611 ha->fw_dump->req_q_size = htonl(req_q_size);
3612 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3614 ha->fw_dump->eft_size = htonl(eft_size);
3615 ha->fw_dump->eft_addr_l =
3616 htonl(LSD(ha->eft_dma));
3617 ha->fw_dump->eft_addr_h =
3618 htonl(MSD(ha->eft_dma));
3620 ha->fw_dump->header_size =
3622 (struct qla2xxx_fw_dump, isp));
3624 mutex_unlock(&ha->optrom_mutex);
3630 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3632 #define MPS_MASK 0xe0
3637 if (!IS_QLA81XX(vha->hw))
3640 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3641 if (rval != QLA_SUCCESS) {
3642 ql_log(ql_log_warn, vha, 0x0105,
3643 "Unable to acquire semaphore.\n");
3647 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3648 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3649 if (rval != QLA_SUCCESS) {
3650 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3655 if (dc == (dw & MPS_MASK))
3660 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3661 if (rval != QLA_SUCCESS) {
3662 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3666 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3667 if (rval != QLA_SUCCESS) {
3668 ql_log(ql_log_warn, vha, 0x006d,
3669 "Unable to release semaphore.\n");
3677 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3679 /* Don't try to reallocate the array */
3680 if (req->outstanding_cmds)
3683 if (!IS_FWI2_CAPABLE(ha))
3684 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3686 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3687 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3689 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3692 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3696 if (!req->outstanding_cmds) {
3698 * Try to allocate a minimal size just so we can get through
3701 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3702 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3706 if (!req->outstanding_cmds) {
3707 ql_log(ql_log_fatal, NULL, 0x0126,
3708 "Failed to allocate memory for "
3709 "outstanding_cmds for req_que %p.\n", req);
3710 req->num_outstanding_cmds = 0;
3711 return QLA_FUNCTION_FAILED;
3718 #define PRINT_FIELD(_field, _flag, _str) { \
3719 if (a0->_field & _flag) {\
3725 len = snprintf(ptr, leftover, "%s", _str); \
3732 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3735 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3736 u8 str[STR_LEN], *ptr, p;
3739 memset(str, 0, STR_LEN);
3740 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3741 ql_dbg(ql_dbg_init, vha, 0x015a,
3742 "SFP MFG Name: %s\n", str);
3744 memset(str, 0, STR_LEN);
3745 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3746 ql_dbg(ql_dbg_init, vha, 0x015c,
3747 "SFP Part Name: %s\n", str);
3750 memset(str, 0, STR_LEN);
3754 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3755 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3756 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3757 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3758 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3759 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3760 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3761 ql_dbg(ql_dbg_init, vha, 0x0160,
3762 "SFP Media: %s\n", str);
3765 memset(str, 0, STR_LEN);
3769 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3770 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3771 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3772 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3773 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3774 ql_dbg(ql_dbg_init, vha, 0x0196,
3775 "SFP Link Length: %s\n", str);
3777 memset(str, 0, STR_LEN);
3781 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3782 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3783 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3784 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3785 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3786 ql_dbg(ql_dbg_init, vha, 0x016e,
3787 "SFP FC Link Tech: %s\n", str);
3790 ql_dbg(ql_dbg_init, vha, 0x016f,
3791 "SFP Distant: %d km\n", a0->length_km);
3792 if (a0->length_100m)
3793 ql_dbg(ql_dbg_init, vha, 0x0170,
3794 "SFP Distant: %d m\n", a0->length_100m*100);
3795 if (a0->length_50um_10m)
3796 ql_dbg(ql_dbg_init, vha, 0x0189,
3797 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3798 if (a0->length_62um_10m)
3799 ql_dbg(ql_dbg_init, vha, 0x018a,
3800 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3801 if (a0->length_om4_10m)
3802 ql_dbg(ql_dbg_init, vha, 0x0194,
3803 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3804 if (a0->length_om3_10m)
3805 ql_dbg(ql_dbg_init, vha, 0x0195,
3806 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3811 * qla24xx_detect_sfp()
3813 * @vha: adapter state pointer.
3816 * 0 -- Configure firmware to use short-range settings -- normal
3817 * buffer-to-buffer credits.
3819 * 1 -- Configure firmware to use long-range settings -- extra
3820 * buffer-to-buffer credits should be allocated with
3821 * ha->lr_distance containing distance settings from NVRAM or SFP
3825 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3828 struct sff_8247_a0 *a;
3829 struct qla_hw_data *ha = vha->hw;
3830 struct nvram_81xx *nv = ha->nvram;
3831 #define LR_DISTANCE_UNKNOWN 2
3832 static const char * const types[] = { "Short", "Long" };
3833 static const char * const lengths[] = { "(10km)", "(5km)", "" };
3836 /* Seed with NVRAM settings. */
3838 ha->flags.lr_detected = 0;
3839 if (IS_BPM_RANGE_CAPABLE(ha) &&
3840 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
3842 ha->flags.lr_detected = 1;
3844 (nv->enhanced_features >> LR_DIST_NV_POS)
3848 if (!IS_BPM_ENABLED(vha))
3850 /* Determine SR/LR capabilities of SFP/Transceiver. */
3851 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3856 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3857 qla2xxx_print_sfp_info(vha);
3859 ha->flags.lr_detected = 0;
3861 if (ll & FC_LL_VL || ll & FC_LL_L) {
3862 /* Long range, track length. */
3863 ha->flags.lr_detected = 1;
3865 if (a->length_km > 5 || a->length_100m > 50)
3866 ha->lr_distance = LR_DISTANCE_10K;
3868 ha->lr_distance = LR_DISTANCE_5K;
3872 ql_dbg(ql_dbg_async, vha, 0x507b,
3873 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
3874 types[ha->flags.lr_detected],
3875 ha->flags.lr_detected ? lengths[ha->lr_distance] :
3876 lengths[LR_DISTANCE_UNKNOWN],
3877 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
3878 return ha->flags.lr_detected;
3881 void qla_init_iocb_limit(scsi_qla_host_t *vha)
3885 struct qla_hw_data *ha = vha->hw;
3887 num_qps = ha->num_qpairs + 1;
3888 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
3890 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
3891 ha->base_qpair->fwres.iocbs_limit = limit;
3892 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
3893 ha->base_qpair->fwres.iocbs_used = 0;
3894 for (i = 0; i < ha->max_qpairs; i++) {
3895 if (ha->queue_pair_map[i]) {
3896 ha->queue_pair_map[i]->fwres.iocbs_total =
3897 ha->orig_fw_iocb_count;
3898 ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
3899 ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
3901 ha->queue_pair_map[i]->fwres.iocbs_used = 0;
3907 * qla2x00_setup_chip() - Load and start RISC firmware.
3910 * Returns 0 on success.
3913 qla2x00_setup_chip(scsi_qla_host_t *vha)
3916 uint32_t srisc_address = 0;
3917 struct qla_hw_data *ha = vha->hw;
3918 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3919 unsigned long flags;
3920 uint16_t fw_major_version;
3923 if (IS_P3P_TYPE(ha)) {
3924 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3925 if (rval == QLA_SUCCESS) {
3926 qla2x00_stop_firmware(vha);
3927 goto enable_82xx_npiv;
3932 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3933 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3934 spin_lock_irqsave(&ha->hardware_lock, flags);
3935 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3936 rd_reg_word(®->hccr);
3937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3940 qla81xx_mpi_sync(vha);
3943 /* Load firmware sequences */
3944 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3945 if (rval == QLA_SUCCESS) {
3946 ql_dbg(ql_dbg_init, vha, 0x00c9,
3947 "Verifying Checksum of loaded RISC code.\n");
3949 rval = qla2x00_verify_checksum(vha, srisc_address);
3950 if (rval == QLA_SUCCESS) {
3951 /* Start firmware execution. */
3952 ql_dbg(ql_dbg_init, vha, 0x00ca,
3953 "Starting firmware.\n");
3956 ha->flags.exlogins_enabled = 1;
3958 if (qla_is_exch_offld_enabled(vha))
3959 ha->flags.exchoffld_enabled = 1;
3961 rval = qla2x00_execute_fw(vha, srisc_address);
3962 /* Retrieve firmware information. */
3963 if (rval == QLA_SUCCESS) {
3964 /* Enable BPM support? */
3965 if (!done_once++ && qla24xx_detect_sfp(vha)) {
3966 ql_dbg(ql_dbg_init, vha, 0x00ca,
3967 "Re-starting firmware -- BPM.\n");
3968 /* Best-effort - re-init. */
3969 ha->isp_ops->reset_chip(vha);
3970 ha->isp_ops->chip_diag(vha);
3971 goto execute_fw_with_lr;
3974 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
3975 qla27xx_set_zio_threshold(vha,
3976 ha->last_zio_threshold);
3978 rval = qla2x00_set_exlogins_buffer(vha);
3979 if (rval != QLA_SUCCESS)
3982 rval = qla2x00_set_exchoffld_buffer(vha);
3983 if (rval != QLA_SUCCESS)
3987 fw_major_version = ha->fw_major_version;
3988 if (IS_P3P_TYPE(ha))
3989 qla82xx_check_md_needed(vha);
3991 rval = qla2x00_get_fw_version(vha);
3992 if (rval != QLA_SUCCESS)
3994 ha->flags.npiv_supported = 0;
3995 if (IS_QLA2XXX_MIDTYPE(ha) &&
3996 (ha->fw_attributes & BIT_2)) {
3997 ha->flags.npiv_supported = 1;
3998 if ((!ha->max_npiv_vports) ||
3999 ((ha->max_npiv_vports + 1) %
4000 MIN_MULTI_ID_FABRIC))
4001 ha->max_npiv_vports =
4002 MIN_MULTI_ID_FABRIC - 1;
4004 qla2x00_get_resource_cnts(vha);
4005 qla_init_iocb_limit(vha);
4008 * Allocate the array of outstanding commands
4009 * now that we know the firmware resources.
4011 rval = qla2x00_alloc_outstanding_cmds(ha,
4013 if (rval != QLA_SUCCESS)
4016 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
4017 qla2x00_alloc_offload_mem(vha);
4019 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4020 qla2x00_alloc_fw_dump(vha);
4026 ql_log(ql_log_fatal, vha, 0x00cd,
4027 "ISP Firmware failed checksum.\n");
4031 /* Enable PUREX PASSTHRU */
4032 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4033 ha->flags.edif_enabled)
4034 qla25xx_set_els_cmds_supported(vha);
4038 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4039 /* Enable proper parity. */
4040 spin_lock_irqsave(&ha->hardware_lock, flags);
4043 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1);
4045 /* SRAM, Instruction RAM and GP RAM parity */
4046 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7);
4047 rd_reg_word(®->hccr);
4048 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4051 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4052 ha->flags.fac_supported = 1;
4053 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4056 rval = qla81xx_fac_get_sector_size(vha, &size);
4057 if (rval == QLA_SUCCESS) {
4058 ha->flags.fac_supported = 1;
4059 ha->fdt_block_size = size << 2;
4061 ql_log(ql_log_warn, vha, 0x00ce,
4062 "Unsupported FAC firmware (%d.%02d.%02d).\n",
4063 ha->fw_major_version, ha->fw_minor_version,
4064 ha->fw_subminor_version);
4066 if (IS_QLA83XX(ha)) {
4067 ha->flags.fac_supported = 0;
4074 ql_log(ql_log_fatal, vha, 0x00cf,
4075 "Setup chip ****FAILED****.\n");
4082 * qla2x00_init_response_q_entries() - Initializes response queue entries.
4083 * @rsp: response queue
4085 * Beginning of request ring has initialization control block already built
4086 * by nvram config routine.
4088 * Returns 0 on success.
4091 qla2x00_init_response_q_entries(struct rsp_que *rsp)
4096 rsp->ring_ptr = rsp->ring;
4097 rsp->ring_index = 0;
4098 rsp->status_srb = NULL;
4099 pkt = rsp->ring_ptr;
4100 for (cnt = 0; cnt < rsp->length; cnt++) {
4101 pkt->signature = RESPONSE_PROCESSED;
4107 * qla2x00_update_fw_options() - Read and process firmware options.
4110 * Returns 0 on success.
4113 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4115 uint16_t swing, emphasis, tx_sens, rx_sens;
4116 struct qla_hw_data *ha = vha->hw;
4118 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4119 qla2x00_get_fw_options(vha, ha->fw_options);
4121 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4124 /* Serial Link options. */
4125 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4126 "Serial link options.\n");
4127 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4128 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4130 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4131 if (ha->fw_seriallink_options[3] & BIT_2) {
4132 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4135 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4136 emphasis = (ha->fw_seriallink_options[2] &
4137 (BIT_4 | BIT_3)) >> 3;
4138 tx_sens = ha->fw_seriallink_options[0] &
4139 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4140 rx_sens = (ha->fw_seriallink_options[0] &
4141 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4142 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4143 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4146 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4147 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4148 ha->fw_options[10] |= BIT_5 |
4149 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4150 (tx_sens & (BIT_1 | BIT_0));
4153 swing = (ha->fw_seriallink_options[2] &
4154 (BIT_7 | BIT_6 | BIT_5)) >> 5;
4155 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4156 tx_sens = ha->fw_seriallink_options[1] &
4157 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4158 rx_sens = (ha->fw_seriallink_options[1] &
4159 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4160 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4161 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4164 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4165 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4166 ha->fw_options[11] |= BIT_5 |
4167 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4168 (tx_sens & (BIT_1 | BIT_0));
4172 /* Return command IOCBs without waiting for an ABTS to complete. */
4173 ha->fw_options[3] |= BIT_13;
4176 if (ha->flags.enable_led_scheme)
4177 ha->fw_options[2] |= BIT_12;
4179 /* Detect ISP6312. */
4181 ha->fw_options[2] |= BIT_13;
4183 /* Set Retry FLOGI in case of P2P connection */
4184 if (ha->operating_mode == P2P) {
4185 ha->fw_options[2] |= BIT_3;
4186 ql_dbg(ql_dbg_disc, vha, 0x2100,
4187 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4188 __func__, ha->fw_options[2]);
4191 /* Update firmware options. */
4192 qla2x00_set_fw_options(vha, ha->fw_options);
4196 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4199 struct qla_hw_data *ha = vha->hw;
4201 if (IS_P3P_TYPE(ha))
4204 /* Hold status IOCBs until ABTS response received. */
4206 ha->fw_options[3] |= BIT_12;
4208 /* Set Retry FLOGI in case of P2P connection */
4209 if (ha->operating_mode == P2P) {
4210 ha->fw_options[2] |= BIT_3;
4211 ql_dbg(ql_dbg_disc, vha, 0x2101,
4212 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4213 __func__, ha->fw_options[2]);
4216 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4217 if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4218 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4219 if (qla_tgt_mode_enabled(vha) ||
4220 qla_dual_mode_enabled(vha))
4221 ha->fw_options[2] |= BIT_11;
4223 ha->fw_options[2] &= ~BIT_11;
4226 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4229 * Tell FW to track each exchange to prevent
4230 * driver from using stale exchange.
4232 if (qla_tgt_mode_enabled(vha) ||
4233 qla_dual_mode_enabled(vha))
4234 ha->fw_options[2] |= BIT_4;
4236 ha->fw_options[2] &= ~(BIT_4);
4238 /* Reserve 1/2 of emergency exchanges for ELS.*/
4239 if (qla2xuseresexchforels)
4240 ha->fw_options[2] |= BIT_8;
4242 ha->fw_options[2] &= ~BIT_8;
4245 * N2N: set Secure=1 for PLOGI ACC and
4246 * fw shal not send PRLI after PLOGI Acc
4248 if (ha->flags.edif_enabled &&
4249 vha->e_dbell.db_flags & EDB_ACTIVE) {
4250 ha->fw_options[3] |= BIT_15;
4251 ha->flags.n2n_fw_acc_sec = 1;
4253 ha->fw_options[3] &= ~BIT_15;
4254 ha->flags.n2n_fw_acc_sec = 0;
4258 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4259 ha->flags.edif_enabled)
4260 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4262 /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4263 if (IS_BPM_RANGE_CAPABLE(ha))
4264 ha->fw_options[3] |= BIT_10;
4266 ql_dbg(ql_dbg_init, vha, 0x00e8,
4267 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4268 __func__, ha->fw_options[1], ha->fw_options[2],
4269 ha->fw_options[3], vha->host->active_mode);
4271 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4272 qla2x00_set_fw_options(vha, ha->fw_options);
4274 /* Update Serial Link options. */
4275 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4278 rval = qla2x00_set_serdes_params(vha,
4279 le16_to_cpu(ha->fw_seriallink_options24[1]),
4280 le16_to_cpu(ha->fw_seriallink_options24[2]),
4281 le16_to_cpu(ha->fw_seriallink_options24[3]));
4282 if (rval != QLA_SUCCESS) {
4283 ql_log(ql_log_warn, vha, 0x0104,
4284 "Unable to update Serial Link options (%x).\n", rval);
4289 qla2x00_config_rings(struct scsi_qla_host *vha)
4291 struct qla_hw_data *ha = vha->hw;
4292 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4293 struct req_que *req = ha->req_q_map[0];
4294 struct rsp_que *rsp = ha->rsp_q_map[0];
4296 /* Setup ring parameters in initialization control block. */
4297 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4298 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4299 ha->init_cb->request_q_length = cpu_to_le16(req->length);
4300 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4301 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4302 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4304 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4305 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4306 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4307 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4308 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
4312 qla24xx_config_rings(struct scsi_qla_host *vha)
4314 struct qla_hw_data *ha = vha->hw;
4315 device_reg_t *reg = ISP_QUE_REG(ha, 0);
4316 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4317 struct qla_msix_entry *msix;
4318 struct init_cb_24xx *icb;
4320 struct req_que *req = ha->req_q_map[0];
4321 struct rsp_que *rsp = ha->rsp_q_map[0];
4323 /* Setup ring parameters in initialization control block. */
4324 icb = (struct init_cb_24xx *)ha->init_cb;
4325 icb->request_q_outpointer = cpu_to_le16(0);
4326 icb->response_q_inpointer = cpu_to_le16(0);
4327 icb->request_q_length = cpu_to_le16(req->length);
4328 icb->response_q_length = cpu_to_le16(rsp->length);
4329 put_unaligned_le64(req->dma, &icb->request_q_address);
4330 put_unaligned_le64(rsp->dma, &icb->response_q_address);
4332 /* Setup ATIO queue dma pointers for target mode */
4333 icb->atio_q_inpointer = cpu_to_le16(0);
4334 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4335 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4337 if (IS_SHADOW_REG_CAPABLE(ha))
4338 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4340 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4342 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4343 icb->rid = cpu_to_le16(rid);
4344 if (ha->flags.msix_enabled) {
4345 msix = &ha->msix_entries[1];
4346 ql_dbg(ql_dbg_init, vha, 0x0019,
4347 "Registering vector 0x%x for base que.\n",
4349 icb->msix = cpu_to_le16(msix->entry);
4351 /* Use alternate PCI bus number */
4353 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4354 /* Use alternate PCI devfn */
4356 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4358 /* Use Disable MSIX Handshake mode for capable adapters */
4359 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4360 (ha->flags.msix_enabled)) {
4361 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4362 ha->flags.disable_msix_handshake = 1;
4363 ql_dbg(ql_dbg_init, vha, 0x00fe,
4364 "MSIX Handshake Disable Mode turned on.\n");
4366 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4368 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4370 wrt_reg_dword(®->isp25mq.req_q_in, 0);
4371 wrt_reg_dword(®->isp25mq.req_q_out, 0);
4372 wrt_reg_dword(®->isp25mq.rsp_q_in, 0);
4373 wrt_reg_dword(®->isp25mq.rsp_q_out, 0);
4375 wrt_reg_dword(®->isp24.req_q_in, 0);
4376 wrt_reg_dword(®->isp24.req_q_out, 0);
4377 wrt_reg_dword(®->isp24.rsp_q_in, 0);
4378 wrt_reg_dword(®->isp24.rsp_q_out, 0);
4381 qlt_24xx_config_rings(vha);
4383 /* If the user has configured the speed, set it here */
4384 if (ha->set_data_rate) {
4385 ql_dbg(ql_dbg_init, vha, 0x00fd,
4386 "Speed set by user : %s Gbps \n",
4387 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4388 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4392 rd_reg_word(&ioreg->hccr);
4396 * qla2x00_init_rings() - Initializes firmware.
4399 * Beginning of request ring has initialization control block already built
4400 * by nvram config routine.
4402 * Returns 0 on success.
4405 qla2x00_init_rings(scsi_qla_host_t *vha)
4408 unsigned long flags = 0;
4410 struct qla_hw_data *ha = vha->hw;
4411 struct req_que *req;
4412 struct rsp_que *rsp;
4413 struct mid_init_cb_24xx *mid_init_cb =
4414 (struct mid_init_cb_24xx *) ha->init_cb;
4416 spin_lock_irqsave(&ha->hardware_lock, flags);
4418 /* Clear outstanding commands array. */
4419 for (que = 0; que < ha->max_req_queues; que++) {
4420 req = ha->req_q_map[que];
4421 if (!req || !test_bit(que, ha->req_qid_map))
4423 req->out_ptr = (uint16_t *)(req->ring + req->length);
4425 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4426 req->outstanding_cmds[cnt] = NULL;
4428 req->current_outstanding_cmd = 1;
4430 /* Initialize firmware. */
4431 req->ring_ptr = req->ring;
4432 req->ring_index = 0;
4433 req->cnt = req->length;
4436 for (que = 0; que < ha->max_rsp_queues; que++) {
4437 rsp = ha->rsp_q_map[que];
4438 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4440 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4442 /* Initialize response queue entries */
4444 qlafx00_init_response_q_entries(rsp);
4446 qla2x00_init_response_q_entries(rsp);
4449 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4450 ha->tgt.atio_ring_index = 0;
4451 /* Initialize ATIO queue entries */
4452 qlt_init_atio_q_entries(vha);
4454 ha->isp_ops->config_rings(vha);
4456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4458 if (IS_QLAFX00(ha)) {
4459 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4463 /* Update any ISP specific firmware options before initialization. */
4464 ha->isp_ops->update_fw_options(vha);
4466 ql_dbg(ql_dbg_init, vha, 0x00d1,
4467 "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4468 le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
4469 le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
4470 le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
4472 if (ha->flags.npiv_supported) {
4473 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4474 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4475 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4478 if (IS_FWI2_CAPABLE(ha)) {
4479 mid_init_cb->options = cpu_to_le16(BIT_1);
4480 mid_init_cb->init_cb.execution_throttle =
4481 cpu_to_le16(ha->cur_fw_xcb_count);
4482 ha->flags.dport_enabled =
4483 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4485 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4486 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4487 /* FA-WWPN Status */
4488 ha->flags.fawwpn_enabled =
4489 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4491 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4492 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4495 /* ELS pass through payload is limit by frame size. */
4496 if (ha->flags.edif_enabled)
4497 mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
4499 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4502 ql_log(ql_log_fatal, vha, 0x00d2,
4503 "Init Firmware **** FAILED ****.\n");
4505 ql_dbg(ql_dbg_init, vha, 0x00d3,
4506 "Init Firmware -- success.\n");
4508 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4515 * qla2x00_fw_ready() - Waits for firmware ready.
4518 * Returns 0 on success.
4521 qla2x00_fw_ready(scsi_qla_host_t *vha)
4524 unsigned long wtime, mtime, cs84xx_time;
4525 uint16_t min_wait; /* Minimum wait time if loop is down */
4526 uint16_t wait_time; /* Wait time if loop is coming ready */
4528 struct qla_hw_data *ha = vha->hw;
4530 if (IS_QLAFX00(vha->hw))
4531 return qlafx00_fw_ready(vha);
4533 /* Time to wait for loop down */
4534 if (IS_P3P_TYPE(ha))
4540 * Firmware should take at most one RATOV to login, plus 5 seconds for
4541 * our own processing.
4543 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4544 wait_time = min_wait;
4547 /* Min wait time if loop down */
4548 mtime = jiffies + (min_wait * HZ);
4550 /* wait time before firmware ready */
4551 wtime = jiffies + (wait_time * HZ);
4553 /* Wait for ISP to finish LIP */
4554 if (!vha->flags.init_done)
4555 ql_log(ql_log_info, vha, 0x801e,
4556 "Waiting for LIP to complete.\n");
4559 memset(state, -1, sizeof(state));
4560 rval = qla2x00_get_firmware_state(vha, state);
4561 if (rval == QLA_SUCCESS) {
4562 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4563 vha->device_flags &= ~DFLG_NO_CABLE;
4565 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4566 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4567 "fw_state=%x 84xx=%x.\n", state[0],
4569 if ((state[2] & FSTATE_LOGGED_IN) &&
4570 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4571 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4572 "Sending verify iocb.\n");
4574 cs84xx_time = jiffies;
4575 rval = qla84xx_init_chip(vha);
4576 if (rval != QLA_SUCCESS) {
4579 "Init chip failed.\n");
4583 /* Add time taken to initialize. */
4584 cs84xx_time = jiffies - cs84xx_time;
4585 wtime += cs84xx_time;
4586 mtime += cs84xx_time;
4587 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4588 "Increasing wait time by %ld. "
4589 "New time %ld.\n", cs84xx_time,
4592 } else if (state[0] == FSTATE_READY) {
4593 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4594 "F/W Ready - OK.\n");
4596 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4597 &ha->login_timeout, &ha->r_a_tov);
4603 rval = QLA_FUNCTION_FAILED;
4605 if (atomic_read(&vha->loop_down_timer) &&
4606 state[0] != FSTATE_READY) {
4607 /* Loop down. Timeout on min_wait for states
4608 * other than Wait for Login.
4610 if (time_after_eq(jiffies, mtime)) {
4611 ql_log(ql_log_info, vha, 0x8038,
4612 "Cable is unplugged...\n");
4614 vha->device_flags |= DFLG_NO_CABLE;
4619 /* Mailbox cmd failed. Timeout on min_wait. */
4620 if (time_after_eq(jiffies, mtime) ||
4621 ha->flags.isp82xx_fw_hung)
4625 if (time_after_eq(jiffies, wtime))
4628 /* Delay for a while */
4632 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4633 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4634 state[1], state[2], state[3], state[4], state[5], jiffies);
4636 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4637 ql_log(ql_log_warn, vha, 0x803b,
4638 "Firmware ready **** FAILED ****.\n");
4645 * qla2x00_configure_hba
4646 * Setup adapter context.
4649 * ha = adapter state pointer.
4658 qla2x00_configure_hba(scsi_qla_host_t *vha)
4667 char connect_type[22];
4668 struct qla_hw_data *ha = vha->hw;
4669 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4671 unsigned long flags;
4673 /* Get host addresses. */
4674 rval = qla2x00_get_adapter_id(vha,
4675 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4676 if (rval != QLA_SUCCESS) {
4677 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4678 IS_CNA_CAPABLE(ha) ||
4679 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4680 ql_dbg(ql_dbg_disc, vha, 0x2008,
4681 "Loop is in a transition state.\n");
4683 ql_log(ql_log_warn, vha, 0x2009,
4684 "Unable to get host loop ID.\n");
4685 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4686 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4687 ql_log(ql_log_warn, vha, 0x1151,
4688 "Doing link init.\n");
4689 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4698 ql_log(ql_log_info, vha, 0x200a,
4699 "Cannot get topology - retrying.\n");
4700 return (QLA_FUNCTION_FAILED);
4703 vha->loop_id = loop_id;
4706 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4707 ha->operating_mode = LOOP;
4711 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4713 ha->current_topology = ISP_CFG_NL;
4714 strcpy(connect_type, "(Loop)");
4718 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4719 ha->switch_cap = sw_cap;
4720 ha->current_topology = ISP_CFG_FL;
4721 strcpy(connect_type, "(FL_Port)");
4725 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4727 ha->operating_mode = P2P;
4728 ha->current_topology = ISP_CFG_N;
4729 strcpy(connect_type, "(N_Port-to-N_Port)");
4733 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4734 ha->switch_cap = sw_cap;
4735 ha->operating_mode = P2P;
4736 ha->current_topology = ISP_CFG_F;
4737 strcpy(connect_type, "(F_Port)");
4741 ql_dbg(ql_dbg_disc, vha, 0x200f,
4742 "HBA in unknown topology %x, using NL.\n", topo);
4744 ha->current_topology = ISP_CFG_NL;
4745 strcpy(connect_type, "(Loop)");
4749 /* Save Host port and loop ID. */
4750 /* byte order - Big Endian */
4751 id.b.domain = domain;
4755 spin_lock_irqsave(&ha->hardware_lock, flags);
4756 if (vha->hw->flags.edif_enabled) {
4758 qlt_update_host_map(vha, id);
4759 } else if (!(topo == 2 && ha->flags.n2n_bigger))
4760 qlt_update_host_map(vha, id);
4761 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4763 if (!vha->flags.init_done)
4764 ql_log(ql_log_info, vha, 0x2010,
4765 "Topology - %s, Host Loop address 0x%x.\n",
4766 connect_type, vha->loop_id);
4772 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4777 uint64_t zero[2] = { 0 };
4778 struct qla_hw_data *ha = vha->hw;
4779 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4780 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4782 if (len > sizeof(zero))
4784 if (memcmp(model, &zero, len) != 0) {
4785 memcpy(ha->model_number, model, len);
4786 st = en = ha->model_number;
4789 if (*en != 0x20 && *en != 0x00)
4794 index = (ha->pdev->subsystem_device & 0xff);
4796 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4797 index < QLA_MODEL_NAMES)
4798 strlcpy(ha->model_desc,
4799 qla2x00_model_name[index * 2 + 1],
4800 sizeof(ha->model_desc));
4802 index = (ha->pdev->subsystem_device & 0xff);
4804 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4805 index < QLA_MODEL_NAMES) {
4806 strlcpy(ha->model_number,
4807 qla2x00_model_name[index * 2],
4808 sizeof(ha->model_number));
4809 strlcpy(ha->model_desc,
4810 qla2x00_model_name[index * 2 + 1],
4811 sizeof(ha->model_desc));
4813 strlcpy(ha->model_number, def,
4814 sizeof(ha->model_number));
4817 if (IS_FWI2_CAPABLE(ha))
4818 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4819 sizeof(ha->model_desc));
4822 /* On sparc systems, obtain port and node WWN from firmware
4825 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4828 struct qla_hw_data *ha = vha->hw;
4829 struct pci_dev *pdev = ha->pdev;
4830 struct device_node *dp = pci_device_to_OF_node(pdev);
4834 val = of_get_property(dp, "port-wwn", &len);
4835 if (val && len >= WWN_SIZE)
4836 memcpy(nv->port_name, val, WWN_SIZE);
4838 val = of_get_property(dp, "node-wwn", &len);
4839 if (val && len >= WWN_SIZE)
4840 memcpy(nv->node_name, val, WWN_SIZE);
4845 * NVRAM configuration for ISP 2xxx
4848 * ha = adapter block pointer.
4851 * initialization control block in response_ring
4852 * host adapters parameters in host adapter block
4858 qla2x00_nvram_config(scsi_qla_host_t *vha)
4863 uint8_t *dptr1, *dptr2;
4864 struct qla_hw_data *ha = vha->hw;
4865 init_cb_t *icb = ha->init_cb;
4866 nvram_t *nv = ha->nvram;
4867 uint8_t *ptr = ha->nvram;
4868 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4872 /* Determine NVRAM starting address. */
4873 ha->nvram_size = sizeof(*nv);
4875 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4876 if ((rd_reg_word(®->ctrl_status) >> 14) == 1)
4877 ha->nvram_base = 0x80;
4879 /* Get NVRAM data and calculate checksum. */
4880 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4881 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4884 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4885 "Contents of NVRAM.\n");
4886 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4887 nv, ha->nvram_size);
4889 /* Bad NVRAM data, set defaults parameters. */
4890 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4891 nv->nvram_version < 1) {
4892 /* Reset NVRAM data. */
4893 ql_log(ql_log_warn, vha, 0x0064,
4894 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4895 chksum, nv->id, nv->nvram_version);
4896 ql_log(ql_log_warn, vha, 0x0065,
4898 "functioning (yet invalid -- WWPN) defaults.\n");
4901 * Set default initialization control block.
4903 memset(nv, 0, ha->nvram_size);
4904 nv->parameter_block_version = ICB_VERSION;
4906 if (IS_QLA23XX(ha)) {
4907 nv->firmware_options[0] = BIT_2 | BIT_1;
4908 nv->firmware_options[1] = BIT_7 | BIT_5;
4909 nv->add_firmware_options[0] = BIT_5;
4910 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4911 nv->frame_payload_size = cpu_to_le16(2048);
4912 nv->special_options[1] = BIT_7;
4913 } else if (IS_QLA2200(ha)) {
4914 nv->firmware_options[0] = BIT_2 | BIT_1;
4915 nv->firmware_options[1] = BIT_7 | BIT_5;
4916 nv->add_firmware_options[0] = BIT_5;
4917 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4918 nv->frame_payload_size = cpu_to_le16(1024);
4919 } else if (IS_QLA2100(ha)) {
4920 nv->firmware_options[0] = BIT_3 | BIT_1;
4921 nv->firmware_options[1] = BIT_5;
4922 nv->frame_payload_size = cpu_to_le16(1024);
4925 nv->max_iocb_allocation = cpu_to_le16(256);
4926 nv->execution_throttle = cpu_to_le16(16);
4927 nv->retry_count = 8;
4928 nv->retry_delay = 1;
4930 nv->port_name[0] = 33;
4931 nv->port_name[3] = 224;
4932 nv->port_name[4] = 139;
4934 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4936 nv->login_timeout = 4;
4939 * Set default host adapter parameters
4941 nv->host_p[1] = BIT_2;
4942 nv->reset_delay = 5;
4943 nv->port_down_retry_count = 8;
4944 nv->max_luns_per_target = cpu_to_le16(8);
4945 nv->link_down_timeout = 60;
4950 /* Reset Initialization control block */
4951 memset(icb, 0, ha->init_cb_size);
4954 * Setup driver NVRAM options.
4956 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4957 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4958 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4959 nv->firmware_options[1] &= ~BIT_4;
4961 if (IS_QLA23XX(ha)) {
4962 nv->firmware_options[0] |= BIT_2;
4963 nv->firmware_options[0] &= ~BIT_3;
4964 nv->special_options[0] &= ~BIT_6;
4965 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4967 if (IS_QLA2300(ha)) {
4968 if (ha->fb_rev == FPM_2310) {
4969 strcpy(ha->model_number, "QLA2310");
4971 strcpy(ha->model_number, "QLA2300");
4974 qla2x00_set_model_info(vha, nv->model_number,
4975 sizeof(nv->model_number), "QLA23xx");
4977 } else if (IS_QLA2200(ha)) {
4978 nv->firmware_options[0] |= BIT_2;
4980 * 'Point-to-point preferred, else loop' is not a safe
4981 * connection mode setting.
4983 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4985 /* Force 'loop preferred, else point-to-point'. */
4986 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4987 nv->add_firmware_options[0] |= BIT_5;
4989 strcpy(ha->model_number, "QLA22xx");
4990 } else /*if (IS_QLA2100(ha))*/ {
4991 strcpy(ha->model_number, "QLA2100");
4995 * Copy over NVRAM RISC parameter block to initialization control block.
4997 dptr1 = (uint8_t *)icb;
4998 dptr2 = (uint8_t *)&nv->parameter_block_version;
4999 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
5001 *dptr1++ = *dptr2++;
5003 /* Copy 2nd half. */
5004 dptr1 = (uint8_t *)icb->add_firmware_options;
5005 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
5007 *dptr1++ = *dptr2++;
5008 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5009 /* Use alternate WWN? */
5010 if (nv->host_p[1] & BIT_7) {
5011 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5012 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5015 /* Prepare nodename */
5016 if ((icb->firmware_options[1] & BIT_6) == 0) {
5018 * Firmware will apply the following mask if the nodename was
5021 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5022 icb->node_name[0] &= 0xF0;
5026 * Set host adapter parameters.
5030 * BIT_7 in the host-parameters section allows for modification to
5031 * internal driver logging.
5033 if (nv->host_p[0] & BIT_7)
5034 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
5035 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5036 /* Always load RISC code on non ISP2[12]00 chips. */
5037 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5038 ha->flags.disable_risc_code_load = 0;
5039 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5040 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5041 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5042 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5043 ha->flags.disable_serdes = 0;
5045 ha->operating_mode =
5046 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
5048 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5049 sizeof(ha->fw_seriallink_options));
5051 /* save HBA serial number */
5052 ha->serial0 = icb->port_name[5];
5053 ha->serial1 = icb->port_name[6];
5054 ha->serial2 = icb->port_name[7];
5055 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5056 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5058 icb->execution_throttle = cpu_to_le16(0xFFFF);
5060 ha->retry_count = nv->retry_count;
5062 /* Set minimum login_timeout to 4 seconds. */
5063 if (nv->login_timeout != ql2xlogintimeout)
5064 nv->login_timeout = ql2xlogintimeout;
5065 if (nv->login_timeout < 4)
5066 nv->login_timeout = 4;
5067 ha->login_timeout = nv->login_timeout;
5069 /* Set minimum RATOV to 100 tenths of a second. */
5072 ha->loop_reset_delay = nv->reset_delay;
5074 /* Link Down Timeout = 0:
5076 * When Port Down timer expires we will start returning
5077 * I/O's to OS with "DID_NO_CONNECT".
5079 * Link Down Timeout != 0:
5081 * The driver waits for the link to come up after link down
5082 * before returning I/Os to OS with "DID_NO_CONNECT".
5084 if (nv->link_down_timeout == 0) {
5085 ha->loop_down_abort_time =
5086 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5088 ha->link_down_timeout = nv->link_down_timeout;
5089 ha->loop_down_abort_time =
5090 (LOOP_DOWN_TIME - ha->link_down_timeout);
5094 * Need enough time to try and get the port back.
5096 ha->port_down_retry_count = nv->port_down_retry_count;
5097 if (qlport_down_retry)
5098 ha->port_down_retry_count = qlport_down_retry;
5099 /* Set login_retry_count */
5100 ha->login_retry_count = nv->retry_count;
5101 if (ha->port_down_retry_count == nv->port_down_retry_count &&
5102 ha->port_down_retry_count > 3)
5103 ha->login_retry_count = ha->port_down_retry_count;
5104 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5105 ha->login_retry_count = ha->port_down_retry_count;
5106 if (ql2xloginretrycount)
5107 ha->login_retry_count = ql2xloginretrycount;
5109 icb->lun_enables = cpu_to_le16(0);
5110 icb->command_resource_count = 0;
5111 icb->immediate_notify_resource_count = 0;
5112 icb->timeout = cpu_to_le16(0);
5114 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5116 icb->firmware_options[0] &= ~BIT_3;
5117 icb->add_firmware_options[0] &=
5118 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5119 icb->add_firmware_options[0] |= BIT_2;
5120 icb->response_accumulation_timer = 3;
5121 icb->interrupt_delay_timer = 5;
5123 vha->flags.process_response_queue = 1;
5126 if (!vha->flags.init_done) {
5127 ha->zio_mode = icb->add_firmware_options[0] &
5128 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5129 ha->zio_timer = icb->interrupt_delay_timer ?
5130 icb->interrupt_delay_timer : 2;
5132 icb->add_firmware_options[0] &=
5133 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5134 vha->flags.process_response_queue = 0;
5135 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5136 ha->zio_mode = QLA_ZIO_MODE_6;
5138 ql_log(ql_log_info, vha, 0x0068,
5139 "ZIO mode %d enabled; timer delay (%d us).\n",
5140 ha->zio_mode, ha->zio_timer * 100);
5142 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5143 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5144 vha->flags.process_response_queue = 1;
5149 ql_log(ql_log_warn, vha, 0x0069,
5150 "NVRAM configuration failed.\n");
5156 qla2x00_rport_del(void *data)
5158 fc_port_t *fcport = data;
5159 struct fc_rport *rport;
5160 unsigned long flags;
5162 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5163 rport = fcport->drport ? fcport->drport : fcport->rport;
5164 fcport->drport = NULL;
5165 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5167 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
5168 "%s %8phN. rport %p roles %x\n",
5169 __func__, fcport->port_name, rport,
5172 fc_remote_port_delete(rport);
5176 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
5180 old_state = atomic_read(&fcport->state);
5181 atomic_set(&fcport->state, state);
5183 /* Don't print state transitions during initial allocation of fcport */
5184 if (old_state && old_state != state) {
5185 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5186 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5187 fcport->port_name, port_state_str[old_state],
5188 port_state_str[state], fcport->d_id.b.domain,
5189 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5194 * qla2x00_alloc_fcport() - Allocate a generic fcport.
5196 * @flags: allocation flags
5198 * Returns a pointer to the allocated fcport, or NULL, if none available.
5201 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5205 fcport = kzalloc(sizeof(fc_port_t), flags);
5209 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5210 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
5212 if (!fcport->ct_desc.ct_sns) {
5213 ql_log(ql_log_warn, vha, 0xd049,
5214 "Failed to allocate ct_sns request.\n");
5219 /* Setup fcport template structure. */
5221 fcport->port_type = FCT_UNKNOWN;
5222 fcport->loop_id = FC_NO_LOOP_ID;
5223 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
5224 fcport->supported_classes = FC_COS_UNSPECIFIED;
5225 fcport->fp_speed = PORT_SPEED_UNKNOWN;
5227 fcport->disc_state = DSC_DELETED;
5228 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
5229 fcport->deleted = QLA_SESS_DELETED;
5230 fcport->login_retry = vha->hw->login_retry_count;
5231 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5232 fcport->logout_on_delete = 1;
5233 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5234 fcport->tgt_short_link_down_cnt = 0;
5235 fcport->dev_loss_tmo = 0;
5237 if (!fcport->ct_desc.ct_sns) {
5238 ql_log(ql_log_warn, vha, 0xd049,
5239 "Failed to allocate ct_sns request.\n");
5244 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5245 INIT_WORK(&fcport->free_work, qlt_free_session_done);
5246 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5247 INIT_LIST_HEAD(&fcport->gnl_entry);
5248 INIT_LIST_HEAD(&fcport->list);
5250 INIT_LIST_HEAD(&fcport->sess_cmd_list);
5251 spin_lock_init(&fcport->sess_cmd_lock);
5253 spin_lock_init(&fcport->edif.sa_list_lock);
5254 INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
5255 INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
5257 if (vha->e_dbell.db_flags == EDB_ACTIVE)
5258 fcport->edif.app_started = 1;
5260 spin_lock_init(&fcport->edif.indx_list_lock);
5261 INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
5267 qla2x00_free_fcport(fc_port_t *fcport)
5269 if (fcport->ct_desc.ct_sns) {
5270 dma_free_coherent(&fcport->vha->hw->pdev->dev,
5271 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5272 fcport->ct_desc.ct_sns_dma);
5274 fcport->ct_desc.ct_sns = NULL;
5277 qla_edif_flush_sa_ctl_lists(fcport);
5278 list_del(&fcport->list);
5279 qla2x00_clear_loop_id(fcport);
5281 qla_edif_list_del(fcport);
5286 static void qla_get_login_template(scsi_qla_host_t *vha)
5288 struct qla_hw_data *ha = vha->hw;
5293 memset(ha->init_cb, 0, ha->init_cb_size);
5294 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5295 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5297 if (rval != QLA_SUCCESS) {
5298 ql_dbg(ql_dbg_init, vha, 0x00d1,
5299 "PLOGI ELS param read fail.\n");
5302 q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5304 bp = (uint32_t *)ha->init_cb;
5305 cpu_to_be32_array(q, bp, sz / 4);
5306 ha->flags.plogi_template_valid = 1;
5310 * qla2x00_configure_loop
5311 * Updates Fibre Channel Device Database with what is actually on loop.
5314 * ha = adapter block pointer.
5319 * 2 = database was full and device was not configured.
5322 qla2x00_configure_loop(scsi_qla_host_t *vha)
5325 unsigned long flags, save_flags;
5326 struct qla_hw_data *ha = vha->hw;
5330 /* Get Initiator ID */
5331 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5332 rval = qla2x00_configure_hba(vha);
5333 if (rval != QLA_SUCCESS) {
5334 ql_dbg(ql_dbg_disc, vha, 0x2013,
5335 "Unable to configure HBA.\n");
5340 save_flags = flags = vha->dpc_flags;
5341 ql_dbg(ql_dbg_disc, vha, 0x2014,
5342 "Configure loop -- dpc flags = 0x%lx.\n", flags);
5345 * If we have both an RSCN and PORT UPDATE pending then handle them
5346 * both at the same time.
5348 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5349 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5351 qla2x00_get_data_rate(vha);
5352 qla_get_login_template(vha);
5354 /* Determine what we need to do */
5355 if ((ha->current_topology == ISP_CFG_FL ||
5356 ha->current_topology == ISP_CFG_F) &&
5357 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5359 set_bit(RSCN_UPDATE, &flags);
5360 clear_bit(LOCAL_LOOP_UPDATE, &flags);
5362 } else if (ha->current_topology == ISP_CFG_NL ||
5363 ha->current_topology == ISP_CFG_N) {
5364 clear_bit(RSCN_UPDATE, &flags);
5365 set_bit(LOCAL_LOOP_UPDATE, &flags);
5366 } else if (!vha->flags.online ||
5367 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5368 set_bit(RSCN_UPDATE, &flags);
5369 set_bit(LOCAL_LOOP_UPDATE, &flags);
5372 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5373 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5374 ql_dbg(ql_dbg_disc, vha, 0x2015,
5375 "Loop resync needed, failing.\n");
5376 rval = QLA_FUNCTION_FAILED;
5378 rval = qla2x00_configure_local_loop(vha);
5381 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5382 if (LOOP_TRANSITION(vha)) {
5383 ql_dbg(ql_dbg_disc, vha, 0x2099,
5384 "Needs RSCN update and loop transition.\n");
5385 rval = QLA_FUNCTION_FAILED;
5388 rval = qla2x00_configure_fabric(vha);
5391 if (rval == QLA_SUCCESS) {
5392 if (atomic_read(&vha->loop_down_timer) ||
5393 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5394 rval = QLA_FUNCTION_FAILED;
5396 atomic_set(&vha->loop_state, LOOP_READY);
5397 ql_dbg(ql_dbg_disc, vha, 0x2069,
5399 ha->flags.fw_init_done = 1;
5402 * use link up to wake up app to get ready for
5405 if (ha->flags.edif_enabled &&
5406 !(vha->e_dbell.db_flags & EDB_ACTIVE))
5407 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5408 ha->link_data_rate);
5411 * Process any ATIO queue entries that came in
5412 * while we weren't online.
5414 if (qla_tgt_mode_enabled(vha) ||
5415 qla_dual_mode_enabled(vha)) {
5416 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5417 qlt_24xx_process_atio_queue(vha, 0);
5418 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5425 ql_dbg(ql_dbg_disc, vha, 0x206a,
5426 "%s *** FAILED ***.\n", __func__);
5428 ql_dbg(ql_dbg_disc, vha, 0x206b,
5429 "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5430 __func__, vha->port_name, vha->d_id.b24);
5433 /* Restore state if a resync event occurred during processing */
5434 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5435 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5436 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5437 if (test_bit(RSCN_UPDATE, &save_flags)) {
5438 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5445 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5447 unsigned long flags;
5450 ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5452 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5453 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5455 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5456 if (fcport->n2n_flag) {
5457 qla24xx_fcport_handle_login(vha, fcport);
5462 spin_lock_irqsave(&vha->work_lock, flags);
5463 vha->scan.scan_retry++;
5464 spin_unlock_irqrestore(&vha->work_lock, flags);
5466 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5467 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5468 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5470 return QLA_FUNCTION_FAILED;
5474 * qla2x00_configure_local_loop
5475 * Updates Fibre Channel Device Database with local loop devices.
5478 * ha = adapter block pointer.
5484 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5489 fc_port_t *fcport, *new_fcport;
5492 struct gid_list_info *gid;
5494 uint8_t domain, area, al_pa;
5495 struct qla_hw_data *ha = vha->hw;
5496 unsigned long flags;
5498 /* Inititae N2N login. */
5500 return qla2x00_configure_n2n_loop(vha);
5504 entries = MAX_FIBRE_DEVICES_LOOP;
5506 /* Get list of logged in devices. */
5507 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5508 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5510 if (rval != QLA_SUCCESS)
5513 ql_dbg(ql_dbg_disc, vha, 0x2011,
5514 "Entries in ID list (%d).\n", entries);
5515 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5516 ha->gid_list, entries * sizeof(*ha->gid_list));
5519 spin_lock_irqsave(&vha->work_lock, flags);
5520 vha->scan.scan_retry++;
5521 spin_unlock_irqrestore(&vha->work_lock, flags);
5523 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5524 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5525 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5528 vha->scan.scan_retry = 0;
5531 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5532 fcport->scan_state = QLA_FCPORT_SCAN;
5535 /* Allocate temporary fcport for any new fcports discovered. */
5536 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5537 if (new_fcport == NULL) {
5538 ql_log(ql_log_warn, vha, 0x2012,
5539 "Memory allocation failed for fcport.\n");
5540 rval = QLA_MEMORY_ALLOC_FAILED;
5543 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5545 /* Add devices to port list. */
5547 for (index = 0; index < entries; index++) {
5548 domain = gid->domain;
5551 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5552 loop_id = gid->loop_id_2100;
5554 loop_id = le16_to_cpu(gid->loop_id);
5555 gid = (void *)gid + ha->gid_list_info_size;
5557 /* Bypass reserved domain fields. */
5558 if ((domain & 0xf0) == 0xf0)
5561 /* Bypass if not same domain and area of adapter. */
5562 if (area && domain && ((area != vha->d_id.b.area) ||
5563 (domain != vha->d_id.b.domain)) &&
5564 (ha->current_topology == ISP_CFG_NL))
5568 /* Bypass invalid local loop ID. */
5569 if (loop_id > LAST_LOCAL_LOOP_ID)
5572 memset(new_fcport->port_name, 0, WWN_SIZE);
5574 /* Fill in member data. */
5575 new_fcport->d_id.b.domain = domain;
5576 new_fcport->d_id.b.area = area;
5577 new_fcport->d_id.b.al_pa = al_pa;
5578 new_fcport->loop_id = loop_id;
5579 new_fcport->scan_state = QLA_FCPORT_FOUND;
5581 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5582 if (rval2 != QLA_SUCCESS) {
5583 ql_dbg(ql_dbg_disc, vha, 0x2097,
5584 "Failed to retrieve fcport information "
5585 "-- get_port_database=%x, loop_id=0x%04x.\n",
5586 rval2, new_fcport->loop_id);
5587 /* Skip retry if N2N */
5588 if (ha->current_topology != ISP_CFG_N) {
5589 ql_dbg(ql_dbg_disc, vha, 0x2105,
5590 "Scheduling resync.\n");
5591 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5596 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5597 /* Check for matching device in port list. */
5600 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5601 if (memcmp(new_fcport->port_name, fcport->port_name,
5605 fcport->flags &= ~FCF_FABRIC_DEVICE;
5606 fcport->loop_id = new_fcport->loop_id;
5607 fcport->port_type = new_fcport->port_type;
5608 fcport->d_id.b24 = new_fcport->d_id.b24;
5609 memcpy(fcport->node_name, new_fcport->node_name,
5611 fcport->scan_state = QLA_FCPORT_FOUND;
5612 if (fcport->login_retry == 0) {
5613 fcport->login_retry = vha->hw->login_retry_count;
5614 ql_dbg(ql_dbg_disc, vha, 0x2135,
5615 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5616 fcport->port_name, fcport->loop_id,
5617 fcport->login_retry);
5624 /* New device, add to fcports list. */
5625 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5627 /* Allocate a new replacement fcport. */
5628 fcport = new_fcport;
5630 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5632 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5634 if (new_fcport == NULL) {
5635 ql_log(ql_log_warn, vha, 0xd031,
5636 "Failed to allocate memory for fcport.\n");
5637 rval = QLA_MEMORY_ALLOC_FAILED;
5640 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5641 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5644 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5646 /* Base iIDMA settings on HBA port speed. */
5647 fcport->fp_speed = ha->link_data_rate;
5652 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5653 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5656 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5657 if ((qla_dual_mode_enabled(vha) ||
5658 qla_ini_mode_enabled(vha)) &&
5659 atomic_read(&fcport->state) == FCS_ONLINE) {
5660 qla2x00_mark_device_lost(vha, fcport,
5661 ql2xplogiabsentdevice);
5662 if (fcport->loop_id != FC_NO_LOOP_ID &&
5663 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5664 fcport->port_type != FCT_INITIATOR &&
5665 fcport->port_type != FCT_BROADCAST) {
5666 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5667 "%s %d %8phC post del sess\n",
5671 qlt_schedule_sess_for_deletion(fcport);
5677 if (fcport->scan_state == QLA_FCPORT_FOUND)
5678 qla24xx_fcport_handle_login(vha, fcport);
5681 qla2x00_free_fcport(new_fcport);
5686 ql_dbg(ql_dbg_disc, vha, 0x2098,
5687 "Configure local loop error exit: rval=%x.\n", rval);
5692 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5695 uint16_t mb[MAILBOX_REGISTER_COUNT];
5696 struct qla_hw_data *ha = vha->hw;
5698 if (!IS_IIDMA_CAPABLE(ha))
5701 if (atomic_read(&fcport->state) != FCS_ONLINE)
5704 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5705 fcport->fp_speed > ha->link_data_rate ||
5706 !ha->flags.gpsc_supported)
5709 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5711 if (rval != QLA_SUCCESS) {
5712 ql_dbg(ql_dbg_disc, vha, 0x2004,
5713 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5714 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5716 ql_dbg(ql_dbg_disc, vha, 0x2005,
5717 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5718 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5719 fcport->fp_speed, fcport->port_name);
5723 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5725 qla2x00_iidma_fcport(vha, fcport);
5726 qla24xx_update_fcport_fcp_prio(vha, fcport);
5729 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5731 struct qla_work_evt *e;
5733 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5735 return QLA_FUNCTION_FAILED;
5737 e->u.fcport.fcport = fcport;
5738 return qla2x00_post_work(vha, e);
5741 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5743 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5745 struct fc_rport_identifiers rport_ids;
5746 struct fc_rport *rport;
5747 unsigned long flags;
5749 if (atomic_read(&fcport->state) == FCS_ONLINE)
5752 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5754 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5755 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5756 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5757 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5758 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5759 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5761 ql_log(ql_log_warn, vha, 0x2006,
5762 "Unable to allocate fc remote port.\n");
5766 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5767 *((fc_port_t **)rport->dd_data) = fcport;
5768 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5769 fcport->dev_loss_tmo = rport->dev_loss_tmo;
5771 rport->supported_classes = fcport->supported_classes;
5773 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5774 if (fcport->port_type == FCT_INITIATOR)
5775 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5776 if (fcport->port_type == FCT_TARGET)
5777 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5778 if (fcport->port_type & FCT_NVME_INITIATOR)
5779 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5780 if (fcport->port_type & FCT_NVME_TARGET)
5781 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5782 if (fcport->port_type & FCT_NVME_DISCOVERY)
5783 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5785 fc_remote_port_rolechg(rport, rport_ids.roles);
5787 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5788 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
5789 __func__, fcport->port_name, vha->host_no,
5790 rport->scsi_target_id, rport,
5791 (fcport->port_type == FCT_TARGET) ? "tgt" :
5792 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5796 * qla2x00_update_fcport
5797 * Updates device on list.
5800 * ha = adapter block pointer.
5801 * fcport = port structure pointer.
5811 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5813 if (IS_SW_RESV_ADDR(fcport->d_id))
5816 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5817 __func__, fcport->port_name);
5819 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5820 fcport->login_retry = vha->hw->login_retry_count;
5821 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5822 fcport->deleted = 0;
5823 if (vha->hw->current_topology == ISP_CFG_NL)
5824 fcport->logout_on_delete = 0;
5826 fcport->logout_on_delete = 1;
5827 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5829 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
5830 fcport->tgt_short_link_down_cnt++;
5831 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5834 switch (vha->hw->current_topology) {
5837 fcport->keep_nport_handle = 1;
5843 qla2x00_iidma_fcport(vha, fcport);
5845 qla2x00_dfs_create_rport(vha, fcport);
5847 if (NVME_TARGET(vha->hw, fcport)) {
5848 qla_nvme_register_remote(vha, fcport);
5849 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5850 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5854 qla24xx_update_fcport_fcp_prio(vha, fcport);
5856 switch (vha->host->active_mode) {
5857 case MODE_INITIATOR:
5858 qla2x00_reg_remote_port(vha, fcport);
5861 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5862 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5863 !vha->vha_tgt.qla_tgt->tgt_stopped)
5864 qlt_fc_port_added(vha, fcport);
5867 qla2x00_reg_remote_port(vha, fcport);
5868 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5869 !vha->vha_tgt.qla_tgt->tgt_stopped)
5870 qlt_fc_port_added(vha, fcport);
5876 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5877 if (fcport->id_changed) {
5878 fcport->id_changed = 0;
5879 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5880 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5881 __func__, __LINE__, fcport->port_name,
5883 qla24xx_post_gfpnid_work(vha, fcport);
5885 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5886 "%s %d %8phC post gpsc fcp_cnt %d\n",
5887 __func__, __LINE__, fcport->port_name,
5889 qla24xx_post_gpsc_work(vha, fcport);
5893 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5896 void qla_register_fcport_fn(struct work_struct *work)
5898 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5899 u32 rscn_gen = fcport->rscn_gen;
5902 if (IS_SW_RESV_ADDR(fcport->d_id))
5905 qla2x00_update_fcport(fcport->vha, fcport);
5907 if (rscn_gen != fcport->rscn_gen) {
5908 /* RSCN(s) came in while registration */
5909 switch (fcport->next_disc_state) {
5910 case DSC_DELETE_PEND:
5911 qlt_schedule_sess_for_deletion(fcport);
5914 data[0] = data[1] = 0;
5915 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5925 * qla2x00_configure_fabric
5926 * Setup SNS devices with loop ID's.
5929 * ha = adapter block pointer.
5936 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5940 uint16_t mb[MAILBOX_REGISTER_COUNT];
5942 LIST_HEAD(new_fcports);
5943 struct qla_hw_data *ha = vha->hw;
5946 /* If FL port exists, then SNS is present */
5947 if (IS_FWI2_CAPABLE(ha))
5948 loop_id = NPH_F_PORT;
5950 loop_id = SNS_FL_PORT;
5951 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5952 if (rval != QLA_SUCCESS) {
5953 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5954 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5956 vha->device_flags &= ~SWITCH_FOUND;
5957 return (QLA_SUCCESS);
5959 vha->device_flags |= SWITCH_FOUND;
5961 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
5962 if (rval != QLA_SUCCESS)
5963 ql_dbg(ql_dbg_disc, vha, 0x20ff,
5964 "Failed to get Fabric Port Name\n");
5966 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5967 rval = qla2x00_send_change_request(vha, 0x3, 0);
5968 if (rval != QLA_SUCCESS)
5969 ql_log(ql_log_warn, vha, 0x121,
5970 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5975 qla2x00_mgmt_svr_login(vha);
5977 /* Ensure we are logged into the SNS. */
5978 loop_id = NPH_SNS_LID(ha);
5979 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5980 0xfc, mb, BIT_1|BIT_0);
5981 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5982 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5983 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5984 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
5985 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5990 if (ql2xfdmienable &&
5991 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5992 qla2x00_fdmi_register(vha);
5994 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5995 if (qla2x00_rft_id(vha)) {
5997 ql_dbg(ql_dbg_disc, vha, 0x20a2,
5998 "Register FC-4 TYPE failed.\n");
5999 if (test_bit(LOOP_RESYNC_NEEDED,
6003 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6005 ql_dbg(ql_dbg_disc, vha, 0x209a,
6006 "Register FC-4 Features failed.\n");
6007 if (test_bit(LOOP_RESYNC_NEEDED,
6011 if (vha->flags.nvme_enabled) {
6012 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6013 ql_dbg(ql_dbg_disc, vha, 0x2049,
6014 "Register NVME FC Type Features failed.\n");
6017 if (qla2x00_rnn_id(vha)) {
6019 ql_dbg(ql_dbg_disc, vha, 0x2104,
6020 "Register Node Name failed.\n");
6021 if (test_bit(LOOP_RESYNC_NEEDED,
6024 } else if (qla2x00_rsnn_nn(vha)) {
6026 ql_dbg(ql_dbg_disc, vha, 0x209b,
6027 "Register Symbolic Node Name failed.\n");
6028 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6034 /* Mark the time right before querying FW for connected ports.
6035 * This process is long, asynchronous and by the time it's done,
6036 * collected information might not be accurate anymore. E.g.
6037 * disconnected port might have re-connected and a brand new
6038 * session has been created. In this case session's generation
6039 * will be newer than discovery_gen. */
6040 qlt_do_generation_tick(vha, &discovery_gen);
6042 if (USE_ASYNC_SCAN(ha)) {
6043 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6046 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6048 list_for_each_entry(fcport, &vha->vp_fcports, list)
6049 fcport->scan_state = QLA_FCPORT_SCAN;
6051 rval = qla2x00_find_all_fabric_devs(vha);
6053 if (rval != QLA_SUCCESS)
6057 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6058 qla_nvme_register_hba(vha);
6061 ql_dbg(ql_dbg_disc, vha, 0x2068,
6062 "Configure fabric error exit rval=%d.\n", rval);
6068 * qla2x00_find_all_fabric_devs
6071 * ha = adapter block pointer.
6072 * dev = database device entry pointer.
6081 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6085 fc_port_t *fcport, *new_fcport;
6090 int first_dev, last_dev;
6091 port_id_t wrap = {}, nxt_d_id;
6092 struct qla_hw_data *ha = vha->hw;
6093 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6094 unsigned long flags;
6098 /* Try GID_PT to get device list, else GAN. */
6100 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6105 ql_dbg(ql_dbg_disc, vha, 0x209c,
6106 "GID_PT allocations failed, fallback on GA_NXT.\n");
6108 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6109 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6111 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6113 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6115 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6117 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6119 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6121 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6123 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6127 /* If other queries succeeded probe for FC-4 type */
6129 qla2x00_gff_id(vha, swl);
6130 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6136 /* Allocate temporary fcport for any new fcports discovered. */
6137 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6138 if (new_fcport == NULL) {
6139 ql_log(ql_log_warn, vha, 0x209d,
6140 "Failed to allocate memory for fcport.\n");
6141 return (QLA_MEMORY_ALLOC_FAILED);
6143 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6144 /* Set start port ID scan at adapter ID. */
6148 /* Starting free loop ID. */
6149 loop_id = ha->min_external_loopid;
6150 for (; loop_id <= ha->max_loop_id; loop_id++) {
6151 if (qla2x00_is_reserved_id(vha, loop_id))
6154 if (ha->current_topology == ISP_CFG_FL &&
6155 (atomic_read(&vha->loop_down_timer) ||
6156 LOOP_TRANSITION(vha))) {
6157 atomic_set(&vha->loop_down_timer, 0);
6158 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6159 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6165 wrap.b24 = new_fcport->d_id.b24;
6167 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
6168 memcpy(new_fcport->node_name,
6169 swl[swl_idx].node_name, WWN_SIZE);
6170 memcpy(new_fcport->port_name,
6171 swl[swl_idx].port_name, WWN_SIZE);
6172 memcpy(new_fcport->fabric_port_name,
6173 swl[swl_idx].fabric_port_name, WWN_SIZE);
6174 new_fcport->fp_speed = swl[swl_idx].fp_speed;
6175 new_fcport->fc4_type = swl[swl_idx].fc4_type;
6177 new_fcport->nvme_flag = 0;
6178 if (vha->flags.nvme_enabled &&
6179 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
6180 ql_log(ql_log_info, vha, 0x2131,
6181 "FOUND: NVME port %8phC as FC Type 28h\n",
6182 new_fcport->port_name);
6185 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
6191 /* Send GA_NXT to the switch */
6192 rval = qla2x00_ga_nxt(vha, new_fcport);
6193 if (rval != QLA_SUCCESS) {
6194 ql_log(ql_log_warn, vha, 0x209e,
6195 "SNS scan failed -- assuming "
6196 "zero-entry result.\n");
6202 /* If wrap on switch device list, exit. */
6204 wrap.b24 = new_fcport->d_id.b24;
6206 } else if (new_fcport->d_id.b24 == wrap.b24) {
6207 ql_dbg(ql_dbg_disc, vha, 0x209f,
6208 "Device wrap (%02x%02x%02x).\n",
6209 new_fcport->d_id.b.domain,
6210 new_fcport->d_id.b.area,
6211 new_fcport->d_id.b.al_pa);
6215 /* Bypass if same physical adapter. */
6216 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
6219 /* Bypass virtual ports of the same host. */
6220 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6223 /* Bypass if same domain and area of adapter. */
6224 if (((new_fcport->d_id.b24 & 0xffff00) ==
6225 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6229 /* Bypass reserved domain fields. */
6230 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
6233 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
6234 if (ql2xgffidenable &&
6235 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
6236 new_fcport->fc4_type != 0))
6239 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6241 /* Locate matching device in database. */
6243 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6244 if (memcmp(new_fcport->port_name, fcport->port_name,
6248 fcport->scan_state = QLA_FCPORT_FOUND;
6252 /* Update port state. */
6253 memcpy(fcport->fabric_port_name,
6254 new_fcport->fabric_port_name, WWN_SIZE);
6255 fcport->fp_speed = new_fcport->fp_speed;
6258 * If address the same and state FCS_ONLINE
6259 * (or in target mode), nothing changed.
6261 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
6262 (atomic_read(&fcport->state) == FCS_ONLINE ||
6263 (vha->host->active_mode == MODE_TARGET))) {
6267 if (fcport->login_retry == 0)
6268 fcport->login_retry =
6269 vha->hw->login_retry_count;
6271 * If device was not a fabric device before.
6273 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
6274 fcport->d_id.b24 = new_fcport->d_id.b24;
6275 qla2x00_clear_loop_id(fcport);
6276 fcport->flags |= (FCF_FABRIC_DEVICE |
6282 * Port ID changed or device was marked to be updated;
6283 * Log it out if still logged in and mark it for
6286 if (qla_tgt_mode_enabled(base_vha)) {
6287 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6288 "port changed FC ID, %8phC"
6289 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6291 fcport->d_id.b.domain,
6292 fcport->d_id.b.area,
6293 fcport->d_id.b.al_pa,
6295 new_fcport->d_id.b.domain,
6296 new_fcport->d_id.b.area,
6297 new_fcport->d_id.b.al_pa);
6298 fcport->d_id.b24 = new_fcport->d_id.b24;
6302 fcport->d_id.b24 = new_fcport->d_id.b24;
6303 fcport->flags |= FCF_LOGIN_NEEDED;
6307 if (found && NVME_TARGET(vha->hw, fcport)) {
6308 if (fcport->disc_state == DSC_DELETE_PEND) {
6309 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6310 vha->fcport_count--;
6311 fcport->login_succ = 0;
6316 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6319 /* If device was not in our fcports list, then add it. */
6320 new_fcport->scan_state = QLA_FCPORT_FOUND;
6321 list_add_tail(&new_fcport->list, &vha->vp_fcports);
6323 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6326 /* Allocate a new replacement fcport. */
6327 nxt_d_id.b24 = new_fcport->d_id.b24;
6328 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6329 if (new_fcport == NULL) {
6330 ql_log(ql_log_warn, vha, 0xd032,
6331 "Memory allocation failed for fcport.\n");
6332 return (QLA_MEMORY_ALLOC_FAILED);
6334 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6335 new_fcport->d_id.b24 = nxt_d_id.b24;
6338 qla2x00_free_fcport(new_fcport);
6341 * Logout all previous fabric dev marked lost, except FCP2 devices.
6343 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6344 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6347 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6350 if (fcport->scan_state == QLA_FCPORT_SCAN) {
6351 if ((qla_dual_mode_enabled(vha) ||
6352 qla_ini_mode_enabled(vha)) &&
6353 atomic_read(&fcport->state) == FCS_ONLINE) {
6354 qla2x00_mark_device_lost(vha, fcport,
6355 ql2xplogiabsentdevice);
6356 if (fcport->loop_id != FC_NO_LOOP_ID &&
6357 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6358 fcport->port_type != FCT_INITIATOR &&
6359 fcport->port_type != FCT_BROADCAST) {
6360 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6361 "%s %d %8phC post del sess\n",
6364 qlt_schedule_sess_for_deletion(fcport);
6370 if (fcport->scan_state == QLA_FCPORT_FOUND &&
6371 (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6372 qla24xx_fcport_handle_login(vha, fcport);
6377 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6379 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6381 int loop_id = FC_NO_LOOP_ID;
6382 int lid = NPH_MGMT_SERVER - vha->vp_idx;
6383 unsigned long flags;
6384 struct qla_hw_data *ha = vha->hw;
6386 if (vha->vp_idx == 0) {
6387 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6388 return NPH_MGMT_SERVER;
6391 /* pick id from high and work down to low */
6392 spin_lock_irqsave(&ha->vport_slock, flags);
6393 for (; lid > 0; lid--) {
6394 if (!test_bit(lid, vha->hw->loop_id_map)) {
6395 set_bit(lid, vha->hw->loop_id_map);
6400 spin_unlock_irqrestore(&ha->vport_slock, flags);
6406 * qla2x00_fabric_login
6407 * Issue fabric login command.
6410 * ha = adapter block pointer.
6411 * device = pointer to FC device type structure.
6414 * 0 - Login successfully
6416 * 2 - Initiator device
6420 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6421 uint16_t *next_loopid)
6425 uint16_t tmp_loopid;
6426 uint16_t mb[MAILBOX_REGISTER_COUNT];
6427 struct qla_hw_data *ha = vha->hw;
6433 ql_dbg(ql_dbg_disc, vha, 0x2000,
6434 "Trying Fabric Login w/loop id 0x%04x for port "
6436 fcport->loop_id, fcport->d_id.b.domain,
6437 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6439 /* Login fcport on switch. */
6440 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6441 fcport->d_id.b.domain, fcport->d_id.b.area,
6442 fcport->d_id.b.al_pa, mb, BIT_0);
6443 if (rval != QLA_SUCCESS) {
6446 if (mb[0] == MBS_PORT_ID_USED) {
6448 * Device has another loop ID. The firmware team
6449 * recommends the driver perform an implicit login with
6450 * the specified ID again. The ID we just used is save
6451 * here so we return with an ID that can be tried by
6455 tmp_loopid = fcport->loop_id;
6456 fcport->loop_id = mb[1];
6458 ql_dbg(ql_dbg_disc, vha, 0x2001,
6459 "Fabric Login: port in use - next loop "
6460 "id=0x%04x, port id= %02x%02x%02x.\n",
6461 fcport->loop_id, fcport->d_id.b.domain,
6462 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6464 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6469 /* A retry occurred before. */
6470 *next_loopid = tmp_loopid;
6473 * No retry occurred before. Just increment the
6474 * ID value for next login.
6476 *next_loopid = (fcport->loop_id + 1);
6479 if (mb[1] & BIT_0) {
6480 fcport->port_type = FCT_INITIATOR;
6482 fcport->port_type = FCT_TARGET;
6483 if (mb[1] & BIT_1) {
6484 fcport->flags |= FCF_FCP2_DEVICE;
6489 fcport->supported_classes |= FC_COS_CLASS2;
6491 fcport->supported_classes |= FC_COS_CLASS3;
6493 if (IS_FWI2_CAPABLE(ha)) {
6496 FCF_CONF_COMP_SUPPORTED;
6501 } else if (mb[0] == MBS_LOOP_ID_USED) {
6503 * Loop ID already used, try next loop ID.
6506 rval = qla2x00_find_new_loop_id(vha, fcport);
6507 if (rval != QLA_SUCCESS) {
6508 /* Ran out of loop IDs to use */
6511 } else if (mb[0] == MBS_COMMAND_ERROR) {
6513 * Firmware possibly timed out during login. If NO
6514 * retries are left to do then the device is declared
6517 *next_loopid = fcport->loop_id;
6518 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6519 fcport->d_id.b.domain, fcport->d_id.b.area,
6520 fcport->d_id.b.al_pa);
6521 qla2x00_mark_device_lost(vha, fcport, 1);
6527 * unrecoverable / not handled error
6529 ql_dbg(ql_dbg_disc, vha, 0x2002,
6530 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6531 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6532 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6533 fcport->loop_id, jiffies);
6535 *next_loopid = fcport->loop_id;
6536 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6537 fcport->d_id.b.domain, fcport->d_id.b.area,
6538 fcport->d_id.b.al_pa);
6539 qla2x00_clear_loop_id(fcport);
6540 fcport->login_retry = 0;
6551 * qla2x00_local_device_login
6552 * Issue local device login command.
6555 * ha = adapter block pointer.
6556 * loop_id = loop id of device to login to.
6558 * Returns (Where's the #define!!!!):
6559 * 0 - Login successfully
6564 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6567 uint16_t mb[MAILBOX_REGISTER_COUNT];
6569 memset(mb, 0, sizeof(mb));
6570 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6571 if (rval == QLA_SUCCESS) {
6572 /* Interrogate mailbox registers for any errors */
6573 if (mb[0] == MBS_COMMAND_ERROR)
6575 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6576 /* device not in PCB table */
6584 * qla2x00_loop_resync
6585 * Resync with fibre channel devices.
6588 * ha = adapter block pointer.
6594 qla2x00_loop_resync(scsi_qla_host_t *vha)
6596 int rval = QLA_SUCCESS;
6599 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6600 if (vha->flags.online) {
6601 if (!(rval = qla2x00_fw_ready(vha))) {
6602 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6605 if (!IS_QLAFX00(vha->hw)) {
6607 * Issue a marker after FW becomes
6610 qla2x00_marker(vha, vha->hw->base_qpair,
6612 vha->marker_needed = 0;
6615 /* Remap devices on Loop. */
6616 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6618 if (IS_QLAFX00(vha->hw))
6619 qlafx00_configure_devices(vha);
6621 qla2x00_configure_loop(vha);
6624 } while (!atomic_read(&vha->loop_down_timer) &&
6625 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6626 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6631 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6632 return (QLA_FUNCTION_FAILED);
6635 ql_dbg(ql_dbg_disc, vha, 0x206c,
6636 "%s *** FAILED ***.\n", __func__);
6642 * qla2x00_perform_loop_resync
6643 * Description: This function will set the appropriate flags and call
6644 * qla2x00_loop_resync. If successful loop will be resynced
6645 * Arguments : scsi_qla_host_t pointer
6646 * returm : Success or Failure
6649 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6653 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6654 /*Configure the flags so that resync happens properly*/
6655 atomic_set(&ha->loop_down_timer, 0);
6656 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6657 atomic_set(&ha->loop_state, LOOP_UP);
6658 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6659 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6660 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6662 rval = qla2x00_loop_resync(ha);
6664 atomic_set(&ha->loop_state, LOOP_DEAD);
6666 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6673 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6676 struct scsi_qla_host *vha, *tvp;
6677 struct qla_hw_data *ha = base_vha->hw;
6678 unsigned long flags;
6680 spin_lock_irqsave(&ha->vport_slock, flags);
6681 /* Go with deferred removal of rport references. */
6682 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
6683 atomic_inc(&vha->vref_count);
6684 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6685 if (fcport->drport &&
6686 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6687 spin_unlock_irqrestore(&ha->vport_slock, flags);
6688 qla2x00_rport_del(fcport);
6690 spin_lock_irqsave(&ha->vport_slock, flags);
6693 atomic_dec(&vha->vref_count);
6694 wake_up(&vha->vref_waitq);
6696 spin_unlock_irqrestore(&ha->vport_slock, flags);
6699 /* Assumes idc_lock always held on entry */
6701 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6703 struct qla_hw_data *ha = vha->hw;
6704 uint32_t drv_presence, drv_presence_mask;
6705 uint32_t dev_part_info1, dev_part_info2, class_type;
6706 uint32_t class_type_mask = 0x3;
6707 uint16_t fcoe_other_function = 0xffff, i;
6709 if (IS_QLA8044(ha)) {
6710 drv_presence = qla8044_rd_direct(vha,
6711 QLA8044_CRB_DRV_ACTIVE_INDEX);
6712 dev_part_info1 = qla8044_rd_direct(vha,
6713 QLA8044_CRB_DEV_PART_INFO_INDEX);
6714 dev_part_info2 = qla8044_rd_direct(vha,
6715 QLA8044_CRB_DEV_PART_INFO2);
6717 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6718 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6719 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6721 for (i = 0; i < 8; i++) {
6722 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6723 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6724 (i != ha->portnum)) {
6725 fcoe_other_function = i;
6729 if (fcoe_other_function == 0xffff) {
6730 for (i = 0; i < 8; i++) {
6731 class_type = ((dev_part_info2 >> (i * 4)) &
6733 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6734 ((i + 8) != ha->portnum)) {
6735 fcoe_other_function = i + 8;
6741 * Prepare drv-presence mask based on fcoe functions present.
6742 * However consider only valid physical fcoe function numbers (0-15).
6744 drv_presence_mask = ~((1 << (ha->portnum)) |
6745 ((fcoe_other_function == 0xffff) ?
6746 0 : (1 << (fcoe_other_function))));
6748 /* We are the reset owner iff:
6749 * - No other protocol drivers present.
6750 * - This is the lowest among fcoe functions. */
6751 if (!(drv_presence & drv_presence_mask) &&
6752 (ha->portnum < fcoe_other_function)) {
6753 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6754 "This host is Reset owner.\n");
6755 ha->flags.nic_core_reset_owner = 1;
6760 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6762 int rval = QLA_SUCCESS;
6763 struct qla_hw_data *ha = vha->hw;
6766 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6767 if (rval == QLA_SUCCESS) {
6768 drv_ack |= (1 << ha->portnum);
6769 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6776 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6778 int rval = QLA_SUCCESS;
6779 struct qla_hw_data *ha = vha->hw;
6782 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6783 if (rval == QLA_SUCCESS) {
6784 drv_ack &= ~(1 << ha->portnum);
6785 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6792 qla83xx_dev_state_to_string(uint32_t dev_state)
6794 switch (dev_state) {
6795 case QLA8XXX_DEV_COLD:
6796 return "COLD/RE-INIT";
6797 case QLA8XXX_DEV_INITIALIZING:
6798 return "INITIALIZING";
6799 case QLA8XXX_DEV_READY:
6801 case QLA8XXX_DEV_NEED_RESET:
6802 return "NEED RESET";
6803 case QLA8XXX_DEV_NEED_QUIESCENT:
6804 return "NEED QUIESCENT";
6805 case QLA8XXX_DEV_FAILED:
6807 case QLA8XXX_DEV_QUIESCENT:
6814 /* Assumes idc-lock always held on entry */
6816 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6818 struct qla_hw_data *ha = vha->hw;
6819 uint32_t idc_audit_reg = 0, duration_secs = 0;
6821 switch (audit_type) {
6822 case IDC_AUDIT_TIMESTAMP:
6823 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6824 idc_audit_reg = (ha->portnum) |
6825 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6826 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6829 case IDC_AUDIT_COMPLETION:
6830 duration_secs = ((jiffies_to_msecs(jiffies) -
6831 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6832 idc_audit_reg = (ha->portnum) |
6833 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6834 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6838 ql_log(ql_log_warn, vha, 0xb078,
6839 "Invalid audit type specified.\n");
6844 /* Assumes idc_lock always held on entry */
6846 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6848 struct qla_hw_data *ha = vha->hw;
6849 uint32_t idc_control, dev_state;
6851 __qla83xx_get_idc_control(vha, &idc_control);
6852 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6853 ql_log(ql_log_info, vha, 0xb080,
6854 "NIC Core reset has been disabled. idc-control=0x%x\n",
6856 return QLA_FUNCTION_FAILED;
6859 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6860 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6861 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6862 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6863 QLA8XXX_DEV_NEED_RESET);
6864 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6865 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6867 const char *state = qla83xx_dev_state_to_string(dev_state);
6869 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6871 /* SV: XXX: Is timeout required here? */
6872 /* Wait for IDC state change READY -> NEED_RESET */
6873 while (dev_state == QLA8XXX_DEV_READY) {
6874 qla83xx_idc_unlock(vha, 0);
6876 qla83xx_idc_lock(vha, 0);
6877 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6881 /* Send IDC ack by writing to drv-ack register */
6882 __qla83xx_set_drv_ack(vha);
6888 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6890 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6894 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6896 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6900 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6902 uint32_t drv_presence = 0;
6903 struct qla_hw_data *ha = vha->hw;
6905 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6906 if (drv_presence & (1 << ha->portnum))
6909 return QLA_TEST_FAILED;
6913 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6915 int rval = QLA_SUCCESS;
6916 struct qla_hw_data *ha = vha->hw;
6918 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6919 "Entered %s().\n", __func__);
6921 if (vha->device_flags & DFLG_DEV_FAILED) {
6922 ql_log(ql_log_warn, vha, 0xb059,
6923 "Device in unrecoverable FAILED state.\n");
6924 return QLA_FUNCTION_FAILED;
6927 qla83xx_idc_lock(vha, 0);
6929 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6930 ql_log(ql_log_warn, vha, 0xb05a,
6931 "Function=0x%x has been removed from IDC participation.\n",
6933 rval = QLA_FUNCTION_FAILED;
6937 qla83xx_reset_ownership(vha);
6939 rval = qla83xx_initiating_reset(vha);
6942 * Perform reset if we are the reset-owner,
6943 * else wait till IDC state changes to READY/FAILED.
6945 if (rval == QLA_SUCCESS) {
6946 rval = qla83xx_idc_state_handler(vha);
6948 if (rval == QLA_SUCCESS)
6949 ha->flags.nic_core_hung = 0;
6950 __qla83xx_clear_drv_ack(vha);
6954 qla83xx_idc_unlock(vha, 0);
6956 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6962 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6964 struct qla_hw_data *ha = vha->hw;
6965 int rval = QLA_FUNCTION_FAILED;
6967 if (!IS_MCTP_CAPABLE(ha)) {
6968 /* This message can be removed from the final version */
6969 ql_log(ql_log_info, vha, 0x506d,
6970 "This board is not MCTP capable\n");
6974 if (!ha->mctp_dump) {
6975 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6976 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6978 if (!ha->mctp_dump) {
6979 ql_log(ql_log_warn, vha, 0x506e,
6980 "Failed to allocate memory for mctp dump\n");
6985 #define MCTP_DUMP_STR_ADDR 0x00000000
6986 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6987 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6988 if (rval != QLA_SUCCESS) {
6989 ql_log(ql_log_warn, vha, 0x506f,
6990 "Failed to capture mctp dump\n");
6992 ql_log(ql_log_info, vha, 0x5070,
6993 "Mctp dump capture for host (%ld/%p).\n",
6994 vha->host_no, ha->mctp_dump);
6995 ha->mctp_dumped = 1;
6998 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6999 ha->flags.nic_core_reset_hdlr_active = 1;
7000 rval = qla83xx_restart_nic_firmware(vha);
7002 /* NIC Core reset failed. */
7003 ql_log(ql_log_warn, vha, 0x5071,
7004 "Failed to restart nic firmware\n");
7006 ql_dbg(ql_dbg_p3p, vha, 0xb084,
7007 "Restarted NIC firmware successfully.\n");
7008 ha->flags.nic_core_reset_hdlr_active = 0;
7016 * qla2x00_quiesce_io
7017 * Description: This function will block the new I/Os
7018 * Its not aborting any I/Os as context
7019 * is not destroyed during quiescence
7020 * Arguments: scsi_qla_host_t
7024 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7026 struct qla_hw_data *ha = vha->hw;
7027 struct scsi_qla_host *vp, *tvp;
7028 unsigned long flags;
7030 ql_dbg(ql_dbg_dpc, vha, 0x401d,
7031 "Quiescing I/O - ha=%p.\n", ha);
7033 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7034 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7035 atomic_set(&vha->loop_state, LOOP_DOWN);
7036 qla2x00_mark_all_devices_lost(vha);
7038 spin_lock_irqsave(&ha->vport_slock, flags);
7039 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7040 atomic_inc(&vp->vref_count);
7041 spin_unlock_irqrestore(&ha->vport_slock, flags);
7043 qla2x00_mark_all_devices_lost(vp);
7045 spin_lock_irqsave(&ha->vport_slock, flags);
7046 atomic_dec(&vp->vref_count);
7048 spin_unlock_irqrestore(&ha->vport_slock, flags);
7050 if (!atomic_read(&vha->loop_down_timer))
7051 atomic_set(&vha->loop_down_timer,
7054 /* Wait for pending cmds to complete */
7055 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7060 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7062 struct qla_hw_data *ha = vha->hw;
7063 struct scsi_qla_host *vp, *tvp;
7064 unsigned long flags;
7068 /* For ISP82XX, driver waits for completion of the commands.
7069 * online flag should be set.
7071 if (!(IS_P3P_TYPE(ha)))
7072 vha->flags.online = 0;
7073 ha->flags.chip_reset_done = 0;
7074 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7075 vha->qla_stats.total_isp_aborts++;
7077 ql_log(ql_log_info, vha, 0x00af,
7078 "Performing ISP error recovery - ha=%p.\n", ha);
7080 ha->flags.purge_mbox = 1;
7081 /* For ISP82XX, reset_chip is just disabling interrupts.
7082 * Driver waits for the completion of the commands.
7083 * the interrupts need to be enabled.
7085 if (!(IS_P3P_TYPE(ha)))
7086 ha->isp_ops->reset_chip(vha);
7088 ha->link_data_rate = PORT_SPEED_UNKNOWN;
7090 ha->flags.rida_fmt2 = 0;
7091 ha->flags.n2n_ae = 0;
7092 ha->flags.lip_ae = 0;
7093 ha->current_topology = 0;
7095 ha->flags.fw_init_done = 0;
7097 ha->base_qpair->chip_reset = ha->chip_reset;
7098 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7099 ha->base_qpair->prev_completion_cnt = 0;
7100 for (i = 0; i < ha->max_qpairs; i++) {
7101 if (ha->queue_pair_map[i]) {
7102 ha->queue_pair_map[i]->chip_reset =
7103 ha->base_qpair->chip_reset;
7104 ha->queue_pair_map[i]->cmd_cnt =
7105 ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7106 ha->base_qpair->prev_completion_cnt = 0;
7110 /* purge MBox commands */
7111 if (atomic_read(&ha->num_pend_mbx_stage3)) {
7112 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7113 complete(&ha->mbx_intr_comp);
7117 while (atomic_read(&ha->num_pend_mbx_stage3) ||
7118 atomic_read(&ha->num_pend_mbx_stage2) ||
7119 atomic_read(&ha->num_pend_mbx_stage1)) {
7125 ha->flags.purge_mbox = 0;
7127 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7128 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7129 atomic_set(&vha->loop_state, LOOP_DOWN);
7130 qla2x00_mark_all_devices_lost(vha);
7132 spin_lock_irqsave(&ha->vport_slock, flags);
7133 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7134 atomic_inc(&vp->vref_count);
7135 spin_unlock_irqrestore(&ha->vport_slock, flags);
7137 qla2x00_mark_all_devices_lost(vp);
7139 spin_lock_irqsave(&ha->vport_slock, flags);
7140 atomic_dec(&vp->vref_count);
7142 spin_unlock_irqrestore(&ha->vport_slock, flags);
7144 if (!atomic_read(&vha->loop_down_timer))
7145 atomic_set(&vha->loop_down_timer,
7149 /* Clear all async request states across all VPs. */
7150 list_for_each_entry(fcport, &vha->vp_fcports, list) {
7151 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7152 fcport->scan_state = 0;
7154 spin_lock_irqsave(&ha->vport_slock, flags);
7155 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7156 atomic_inc(&vp->vref_count);
7157 spin_unlock_irqrestore(&ha->vport_slock, flags);
7159 list_for_each_entry(fcport, &vp->vp_fcports, list)
7160 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7162 spin_lock_irqsave(&ha->vport_slock, flags);
7163 atomic_dec(&vp->vref_count);
7165 spin_unlock_irqrestore(&ha->vport_slock, flags);
7167 /* Make sure for ISP 82XX IO DMA is complete */
7168 if (IS_P3P_TYPE(ha)) {
7169 qla82xx_chip_reset_cleanup(vha);
7170 ql_log(ql_log_info, vha, 0x00b4,
7171 "Done chip reset cleanup.\n");
7173 /* Done waiting for pending commands. Reset online flag */
7174 vha->flags.online = 0;
7177 /* Requeue all commands in outstanding command list. */
7178 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7179 /* memory barrier */
7185 * Resets ISP and aborts all outstanding commands.
7188 * ha = adapter block pointer.
7194 qla2x00_abort_isp(scsi_qla_host_t *vha)
7198 struct qla_hw_data *ha = vha->hw;
7199 struct scsi_qla_host *vp, *tvp;
7200 struct req_que *req = ha->req_q_map[0];
7201 unsigned long flags;
7203 if (vha->flags.online) {
7204 qla2x00_abort_isp_cleanup(vha);
7206 if (vha->hw->flags.port_isolated)
7209 if (qla2x00_isp_reg_stat(ha)) {
7210 ql_log(ql_log_info, vha, 0x803f,
7211 "ISP Abort - ISP reg disconnect, exiting.\n");
7215 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7216 ha->flags.chip_reset_done = 1;
7217 vha->flags.online = 1;
7219 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7223 if (IS_QLA8031(ha)) {
7224 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7225 "Clearing fcoe driver presence.\n");
7226 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7227 ql_dbg(ql_dbg_p3p, vha, 0xb073,
7228 "Error while clearing DRV-Presence.\n");
7231 if (unlikely(pci_channel_offline(ha->pdev) &&
7232 ha->flags.pci_channel_io_perm_failure)) {
7233 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7238 switch (vha->qlini_mode) {
7239 case QLA2XXX_INI_MODE_DISABLED:
7240 if (!qla_tgt_mode_enabled(vha))
7243 case QLA2XXX_INI_MODE_DUAL:
7244 if (!qla_dual_mode_enabled(vha) &&
7245 !qla_ini_mode_enabled(vha))
7248 case QLA2XXX_INI_MODE_ENABLED:
7253 ha->isp_ops->get_flash_version(vha, req->ring);
7255 if (qla2x00_isp_reg_stat(ha)) {
7256 ql_log(ql_log_info, vha, 0x803f,
7257 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7260 ha->isp_ops->nvram_config(vha);
7262 if (qla2x00_isp_reg_stat(ha)) {
7263 ql_log(ql_log_info, vha, 0x803f,
7264 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7267 if (!qla2x00_restart_isp(vha)) {
7268 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7270 if (!atomic_read(&vha->loop_down_timer)) {
7272 * Issue marker command only when we are going
7273 * to start the I/O .
7275 vha->marker_needed = 1;
7278 vha->flags.online = 1;
7280 ha->isp_ops->enable_intrs(ha);
7282 ha->isp_abort_cnt = 0;
7283 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7285 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7286 qla2x00_get_fw_version(vha);
7288 ha->flags.fce_enabled = 1;
7290 fce_calc_size(ha->fce_bufs));
7291 rval = qla2x00_enable_fce_trace(vha,
7292 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7295 ql_log(ql_log_warn, vha, 0x8033,
7296 "Unable to reinitialize FCE "
7298 ha->flags.fce_enabled = 0;
7303 memset(ha->eft, 0, EFT_SIZE);
7304 rval = qla2x00_enable_eft_trace(vha,
7305 ha->eft_dma, EFT_NUM_BUFFERS);
7307 ql_log(ql_log_warn, vha, 0x8034,
7308 "Unable to reinitialize EFT "
7312 } else { /* failed the ISP abort */
7313 vha->flags.online = 1;
7314 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7315 if (ha->isp_abort_cnt == 0) {
7316 ql_log(ql_log_fatal, vha, 0x8035,
7317 "ISP error recover failed - "
7318 "board disabled.\n");
7320 * The next call disables the board
7323 qla2x00_abort_isp_cleanup(vha);
7324 vha->flags.online = 0;
7325 clear_bit(ISP_ABORT_RETRY,
7328 } else { /* schedule another ISP abort */
7329 ha->isp_abort_cnt--;
7330 ql_dbg(ql_dbg_taskm, vha, 0x8020,
7331 "ISP abort - retry remaining %d.\n",
7336 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7337 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7338 "ISP error recovery - retrying (%d) "
7339 "more times.\n", ha->isp_abort_cnt);
7340 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7347 if (vha->hw->flags.port_isolated) {
7348 qla2x00_abort_isp_cleanup(vha);
7353 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7354 qla2x00_configure_hba(vha);
7355 spin_lock_irqsave(&ha->vport_slock, flags);
7356 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7358 atomic_inc(&vp->vref_count);
7359 spin_unlock_irqrestore(&ha->vport_slock, flags);
7361 qla2x00_vp_abort_isp(vp);
7363 spin_lock_irqsave(&ha->vport_slock, flags);
7364 atomic_dec(&vp->vref_count);
7367 spin_unlock_irqrestore(&ha->vport_slock, flags);
7369 if (IS_QLA8031(ha)) {
7370 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7371 "Setting back fcoe driver presence.\n");
7372 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7373 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7374 "Error while setting DRV-Presence.\n");
7377 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7385 * qla2x00_restart_isp
7386 * restarts the ISP after a reset
7389 * ha = adapter block pointer.
7395 qla2x00_restart_isp(scsi_qla_host_t *vha)
7398 struct qla_hw_data *ha = vha->hw;
7400 /* If firmware needs to be loaded */
7401 if (qla2x00_isp_firmware(vha)) {
7402 vha->flags.online = 0;
7403 status = ha->isp_ops->chip_diag(vha);
7406 status = qla2x00_setup_chip(vha);
7411 status = qla2x00_init_rings(vha);
7415 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7416 ha->flags.chip_reset_done = 1;
7418 /* Initialize the queues in use */
7419 qla25xx_init_queues(ha);
7421 status = qla2x00_fw_ready(vha);
7423 /* if no cable then assume it's good */
7424 return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7427 /* Issue a marker after FW becomes ready. */
7428 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7429 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7435 qla25xx_init_queues(struct qla_hw_data *ha)
7437 struct rsp_que *rsp = NULL;
7438 struct req_que *req = NULL;
7439 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7443 for (i = 1; i < ha->max_rsp_queues; i++) {
7444 rsp = ha->rsp_q_map[i];
7445 if (rsp && test_bit(i, ha->rsp_qid_map)) {
7446 rsp->options &= ~BIT_0;
7447 ret = qla25xx_init_rsp_que(base_vha, rsp);
7448 if (ret != QLA_SUCCESS)
7449 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7450 "%s Rsp que: %d init failed.\n",
7453 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7454 "%s Rsp que: %d inited.\n",
7458 for (i = 1; i < ha->max_req_queues; i++) {
7459 req = ha->req_q_map[i];
7460 if (req && test_bit(i, ha->req_qid_map)) {
7461 /* Clear outstanding commands array. */
7462 req->options &= ~BIT_0;
7463 ret = qla25xx_init_req_que(base_vha, req);
7464 if (ret != QLA_SUCCESS)
7465 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7466 "%s Req que: %d init failed.\n",
7469 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7470 "%s Req que: %d inited.\n",
7478 * qla2x00_reset_adapter
7482 * ha = adapter block pointer.
7485 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7487 unsigned long flags = 0;
7488 struct qla_hw_data *ha = vha->hw;
7489 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7491 vha->flags.online = 0;
7492 ha->isp_ops->disable_intrs(ha);
7494 spin_lock_irqsave(&ha->hardware_lock, flags);
7495 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
7496 rd_reg_word(®->hccr); /* PCI Posting. */
7497 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
7498 rd_reg_word(®->hccr); /* PCI Posting. */
7499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7505 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7507 unsigned long flags = 0;
7508 struct qla_hw_data *ha = vha->hw;
7509 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7511 if (IS_P3P_TYPE(ha))
7514 vha->flags.online = 0;
7515 ha->isp_ops->disable_intrs(ha);
7517 spin_lock_irqsave(&ha->hardware_lock, flags);
7518 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
7519 rd_reg_dword(®->hccr);
7520 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
7521 rd_reg_dword(®->hccr);
7522 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7524 if (IS_NOPOLLING_TYPE(ha))
7525 ha->isp_ops->enable_intrs(ha);
7530 /* On sparc systems, obtain port and node WWN from firmware
7533 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7534 struct nvram_24xx *nv)
7537 struct qla_hw_data *ha = vha->hw;
7538 struct pci_dev *pdev = ha->pdev;
7539 struct device_node *dp = pci_device_to_OF_node(pdev);
7543 val = of_get_property(dp, "port-wwn", &len);
7544 if (val && len >= WWN_SIZE)
7545 memcpy(nv->port_name, val, WWN_SIZE);
7547 val = of_get_property(dp, "node-wwn", &len);
7548 if (val && len >= WWN_SIZE)
7549 memcpy(nv->node_name, val, WWN_SIZE);
7554 qla24xx_nvram_config(scsi_qla_host_t *vha)
7557 struct init_cb_24xx *icb;
7558 struct nvram_24xx *nv;
7560 uint8_t *dptr1, *dptr2;
7563 struct qla_hw_data *ha = vha->hw;
7566 icb = (struct init_cb_24xx *)ha->init_cb;
7569 /* Determine NVRAM starting address. */
7570 if (ha->port_no == 0) {
7571 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7572 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7575 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7578 ha->nvram_size = sizeof(*nv);
7579 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7581 /* Get VPD data into cache */
7582 ha->vpd = ha->nvram + VPD_OFFSET;
7583 ha->isp_ops->read_nvram(vha, ha->vpd,
7584 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7586 /* Get NVRAM data into cache and calculate checksum. */
7587 dptr = (__force __le32 *)nv;
7588 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7589 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7590 chksum += le32_to_cpu(*dptr);
7592 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7593 "Contents of NVRAM\n");
7594 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7595 nv, ha->nvram_size);
7597 /* Bad NVRAM data, set defaults parameters. */
7598 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7599 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7600 /* Reset NVRAM data. */
7601 ql_log(ql_log_warn, vha, 0x006b,
7602 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7603 chksum, nv->id, nv->nvram_version);
7604 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7605 ql_log(ql_log_warn, vha, 0x006c,
7606 "Falling back to functioning (yet invalid -- WWPN) "
7610 * Set default initialization control block.
7612 memset(nv, 0, ha->nvram_size);
7613 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7614 nv->version = cpu_to_le16(ICB_VERSION);
7615 nv->frame_payload_size = cpu_to_le16(2048);
7616 nv->execution_throttle = cpu_to_le16(0xFFFF);
7617 nv->exchange_count = cpu_to_le16(0);
7618 nv->hard_address = cpu_to_le16(124);
7619 nv->port_name[0] = 0x21;
7620 nv->port_name[1] = 0x00 + ha->port_no + 1;
7621 nv->port_name[2] = 0x00;
7622 nv->port_name[3] = 0xe0;
7623 nv->port_name[4] = 0x8b;
7624 nv->port_name[5] = 0x1c;
7625 nv->port_name[6] = 0x55;
7626 nv->port_name[7] = 0x86;
7627 nv->node_name[0] = 0x20;
7628 nv->node_name[1] = 0x00;
7629 nv->node_name[2] = 0x00;
7630 nv->node_name[3] = 0xe0;
7631 nv->node_name[4] = 0x8b;
7632 nv->node_name[5] = 0x1c;
7633 nv->node_name[6] = 0x55;
7634 nv->node_name[7] = 0x86;
7635 qla24xx_nvram_wwn_from_ofw(vha, nv);
7636 nv->login_retry_count = cpu_to_le16(8);
7637 nv->interrupt_delay_timer = cpu_to_le16(0);
7638 nv->login_timeout = cpu_to_le16(0);
7639 nv->firmware_options_1 =
7640 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7641 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7642 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7643 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7644 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7645 nv->efi_parameters = cpu_to_le32(0);
7646 nv->reset_delay = 5;
7647 nv->max_luns_per_target = cpu_to_le16(128);
7648 nv->port_down_retry_count = cpu_to_le16(30);
7649 nv->link_down_timeout = cpu_to_le16(30);
7654 if (qla_tgt_mode_enabled(vha)) {
7655 /* Don't enable full login after initial LIP */
7656 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7657 /* Don't enable LIP full login for initiator */
7658 nv->host_p &= cpu_to_le32(~BIT_10);
7661 qlt_24xx_config_nvram_stage1(vha, nv);
7663 /* Reset Initialization control block */
7664 memset(icb, 0, ha->init_cb_size);
7666 /* Copy 1st segment. */
7667 dptr1 = (uint8_t *)icb;
7668 dptr2 = (uint8_t *)&nv->version;
7669 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7671 *dptr1++ = *dptr2++;
7673 icb->login_retry_count = nv->login_retry_count;
7674 icb->link_down_on_nos = nv->link_down_on_nos;
7676 /* Copy 2nd segment. */
7677 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7678 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7679 cnt = (uint8_t *)&icb->reserved_3 -
7680 (uint8_t *)&icb->interrupt_delay_timer;
7682 *dptr1++ = *dptr2++;
7683 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7685 * Setup driver NVRAM options.
7687 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7690 qlt_24xx_config_nvram_stage2(vha, icb);
7692 if (nv->host_p & cpu_to_le32(BIT_15)) {
7693 /* Use alternate WWN? */
7694 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7695 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7698 /* Prepare nodename */
7699 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7701 * Firmware will apply the following mask if the nodename was
7704 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7705 icb->node_name[0] &= 0xF0;
7708 /* Set host adapter parameters. */
7709 ha->flags.disable_risc_code_load = 0;
7710 ha->flags.enable_lip_reset = 0;
7711 ha->flags.enable_lip_full_login =
7712 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7713 ha->flags.enable_target_reset =
7714 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7715 ha->flags.enable_led_scheme = 0;
7716 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7718 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7719 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7721 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7722 sizeof(ha->fw_seriallink_options24));
7724 /* save HBA serial number */
7725 ha->serial0 = icb->port_name[5];
7726 ha->serial1 = icb->port_name[6];
7727 ha->serial2 = icb->port_name[7];
7728 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7729 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7731 icb->execution_throttle = cpu_to_le16(0xFFFF);
7733 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7735 /* Set minimum login_timeout to 4 seconds. */
7736 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7737 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7738 if (le16_to_cpu(nv->login_timeout) < 4)
7739 nv->login_timeout = cpu_to_le16(4);
7740 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7742 /* Set minimum RATOV to 100 tenths of a second. */
7745 ha->loop_reset_delay = nv->reset_delay;
7747 /* Link Down Timeout = 0:
7749 * When Port Down timer expires we will start returning
7750 * I/O's to OS with "DID_NO_CONNECT".
7752 * Link Down Timeout != 0:
7754 * The driver waits for the link to come up after link down
7755 * before returning I/Os to OS with "DID_NO_CONNECT".
7757 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7758 ha->loop_down_abort_time =
7759 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7761 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7762 ha->loop_down_abort_time =
7763 (LOOP_DOWN_TIME - ha->link_down_timeout);
7766 /* Need enough time to try and get the port back. */
7767 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7768 if (qlport_down_retry)
7769 ha->port_down_retry_count = qlport_down_retry;
7771 /* Set login_retry_count */
7772 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7773 if (ha->port_down_retry_count ==
7774 le16_to_cpu(nv->port_down_retry_count) &&
7775 ha->port_down_retry_count > 3)
7776 ha->login_retry_count = ha->port_down_retry_count;
7777 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7778 ha->login_retry_count = ha->port_down_retry_count;
7779 if (ql2xloginretrycount)
7780 ha->login_retry_count = ql2xloginretrycount;
7782 /* N2N: driver will initiate Login instead of FW */
7783 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
7786 if (!vha->flags.init_done) {
7787 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7788 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7789 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7790 le16_to_cpu(icb->interrupt_delay_timer) : 2;
7792 icb->firmware_options_2 &= cpu_to_le32(
7793 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7794 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7795 ha->zio_mode = QLA_ZIO_MODE_6;
7797 ql_log(ql_log_info, vha, 0x006f,
7798 "ZIO mode %d enabled; timer delay (%d us).\n",
7799 ha->zio_mode, ha->zio_timer * 100);
7801 icb->firmware_options_2 |= cpu_to_le32(
7802 (uint32_t)ha->zio_mode);
7803 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7807 ql_log(ql_log_warn, vha, 0x0070,
7808 "NVRAM configuration failed.\n");
7814 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7815 struct qla27xx_image_status *image_status)
7817 ql_dbg(ql_dbg_init, vha, 0x018b,
7818 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7820 image_status->image_status_mask,
7821 le16_to_cpu(image_status->generation),
7822 image_status->ver_major,
7823 image_status->ver_minor,
7824 image_status->bitmap,
7825 le32_to_cpu(image_status->checksum),
7826 le32_to_cpu(image_status->signature));
7830 qla28xx_check_aux_image_status_signature(
7831 struct qla27xx_image_status *image_status)
7833 ulong signature = le32_to_cpu(image_status->signature);
7835 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7839 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7841 ulong signature = le32_to_cpu(image_status->signature);
7844 signature != QLA27XX_IMG_STATUS_SIGN &&
7845 signature != QLA28XX_IMG_STATUS_SIGN;
7849 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7851 __le32 *p = (__force __le32 *)image_status;
7852 uint n = sizeof(*image_status) / sizeof(*p);
7856 sum += le32_to_cpup(p);
7862 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7864 return aux->bitmap & bitmask ?
7865 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7869 qla28xx_component_status(
7870 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7872 active_regions->aux.board_config =
7873 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7875 active_regions->aux.vpd_nvram =
7876 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7878 active_regions->aux.npiv_config_0_1 =
7879 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7881 active_regions->aux.npiv_config_2_3 =
7882 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7886 qla27xx_compare_image_generation(
7887 struct qla27xx_image_status *pri_image_status,
7888 struct qla27xx_image_status *sec_image_status)
7890 /* calculate generation delta as uint16 (this accounts for wrap) */
7892 le16_to_cpu(pri_image_status->generation) -
7893 le16_to_cpu(sec_image_status->generation);
7895 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7901 qla28xx_get_aux_images(
7902 struct scsi_qla_host *vha, struct active_regions *active_regions)
7904 struct qla_hw_data *ha = vha->hw;
7905 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7906 bool valid_pri_image = false, valid_sec_image = false;
7907 bool active_pri_image = false, active_sec_image = false;
7909 if (!ha->flt_region_aux_img_status_pri) {
7910 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7911 goto check_sec_image;
7914 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
7915 ha->flt_region_aux_img_status_pri,
7916 sizeof(pri_aux_image_status) >> 2);
7917 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7919 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7920 ql_dbg(ql_dbg_init, vha, 0x018b,
7921 "Primary aux image signature (%#x) not valid\n",
7922 le32_to_cpu(pri_aux_image_status.signature));
7923 goto check_sec_image;
7926 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7927 ql_dbg(ql_dbg_init, vha, 0x018c,
7928 "Primary aux image checksum failed\n");
7929 goto check_sec_image;
7932 valid_pri_image = true;
7934 if (pri_aux_image_status.image_status_mask & 1) {
7935 ql_dbg(ql_dbg_init, vha, 0x018d,
7936 "Primary aux image is active\n");
7937 active_pri_image = true;
7941 if (!ha->flt_region_aux_img_status_sec) {
7942 ql_dbg(ql_dbg_init, vha, 0x018a,
7943 "Secondary aux image not addressed\n");
7944 goto check_valid_image;
7947 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
7948 ha->flt_region_aux_img_status_sec,
7949 sizeof(sec_aux_image_status) >> 2);
7950 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7952 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7953 ql_dbg(ql_dbg_init, vha, 0x018b,
7954 "Secondary aux image signature (%#x) not valid\n",
7955 le32_to_cpu(sec_aux_image_status.signature));
7956 goto check_valid_image;
7959 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7960 ql_dbg(ql_dbg_init, vha, 0x018c,
7961 "Secondary aux image checksum failed\n");
7962 goto check_valid_image;
7965 valid_sec_image = true;
7967 if (sec_aux_image_status.image_status_mask & 1) {
7968 ql_dbg(ql_dbg_init, vha, 0x018d,
7969 "Secondary aux image is active\n");
7970 active_sec_image = true;
7974 if (valid_pri_image && active_pri_image &&
7975 valid_sec_image && active_sec_image) {
7976 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7977 &sec_aux_image_status) >= 0) {
7978 qla28xx_component_status(active_regions,
7979 &pri_aux_image_status);
7981 qla28xx_component_status(active_regions,
7982 &sec_aux_image_status);
7984 } else if (valid_pri_image && active_pri_image) {
7985 qla28xx_component_status(active_regions, &pri_aux_image_status);
7986 } else if (valid_sec_image && active_sec_image) {
7987 qla28xx_component_status(active_regions, &sec_aux_image_status);
7990 ql_dbg(ql_dbg_init, vha, 0x018f,
7991 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7992 active_regions->aux.board_config,
7993 active_regions->aux.vpd_nvram,
7994 active_regions->aux.npiv_config_0_1,
7995 active_regions->aux.npiv_config_2_3);
7999 qla27xx_get_active_image(struct scsi_qla_host *vha,
8000 struct active_regions *active_regions)
8002 struct qla_hw_data *ha = vha->hw;
8003 struct qla27xx_image_status pri_image_status, sec_image_status;
8004 bool valid_pri_image = false, valid_sec_image = false;
8005 bool active_pri_image = false, active_sec_image = false;
8007 if (!ha->flt_region_img_status_pri) {
8008 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8009 goto check_sec_image;
8012 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8013 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8016 goto check_sec_image;
8018 qla27xx_print_image(vha, "Primary image", &pri_image_status);
8020 if (qla27xx_check_image_status_signature(&pri_image_status)) {
8021 ql_dbg(ql_dbg_init, vha, 0x018b,
8022 "Primary image signature (%#x) not valid\n",
8023 le32_to_cpu(pri_image_status.signature));
8024 goto check_sec_image;
8027 if (qla27xx_image_status_checksum(&pri_image_status)) {
8028 ql_dbg(ql_dbg_init, vha, 0x018c,
8029 "Primary image checksum failed\n");
8030 goto check_sec_image;
8033 valid_pri_image = true;
8035 if (pri_image_status.image_status_mask & 1) {
8036 ql_dbg(ql_dbg_init, vha, 0x018d,
8037 "Primary image is active\n");
8038 active_pri_image = true;
8042 if (!ha->flt_region_img_status_sec) {
8043 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8044 goto check_valid_image;
8047 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8048 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8049 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8051 if (qla27xx_check_image_status_signature(&sec_image_status)) {
8052 ql_dbg(ql_dbg_init, vha, 0x018b,
8053 "Secondary image signature (%#x) not valid\n",
8054 le32_to_cpu(sec_image_status.signature));
8055 goto check_valid_image;
8058 if (qla27xx_image_status_checksum(&sec_image_status)) {
8059 ql_dbg(ql_dbg_init, vha, 0x018c,
8060 "Secondary image checksum failed\n");
8061 goto check_valid_image;
8064 valid_sec_image = true;
8066 if (sec_image_status.image_status_mask & 1) {
8067 ql_dbg(ql_dbg_init, vha, 0x018d,
8068 "Secondary image is active\n");
8069 active_sec_image = true;
8073 if (valid_pri_image && active_pri_image)
8074 active_regions->global = QLA27XX_PRIMARY_IMAGE;
8076 if (valid_sec_image && active_sec_image) {
8077 if (!active_regions->global ||
8078 qla27xx_compare_image_generation(
8079 &pri_image_status, &sec_image_status) < 0) {
8080 active_regions->global = QLA27XX_SECONDARY_IMAGE;
8084 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8085 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
8086 "default (boot/fw)" :
8087 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
8089 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
8090 "secondary" : "invalid",
8091 active_regions->global);
8094 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
8097 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
8098 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
8102 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8106 uint templates, segments, fragment;
8111 uint32_t risc_addr, risc_size, risc_attr = 0;
8112 struct qla_hw_data *ha = vha->hw;
8113 struct req_que *req = ha->req_q_map[0];
8114 struct fwdt *fwdt = ha->fwdt;
8116 ql_dbg(ql_dbg_init, vha, 0x008b,
8117 "FW: Loading firmware from flash (%x).\n", faddr);
8119 dcode = (uint32_t *)req->ring;
8120 qla24xx_read_flash_data(vha, dcode, faddr, 8);
8121 if (qla24xx_risc_firmware_invalid(dcode)) {
8122 ql_log(ql_log_fatal, vha, 0x008c,
8123 "Unable to verify the integrity of flash firmware "
8125 ql_log(ql_log_fatal, vha, 0x008d,
8126 "Firmware data: %08x %08x %08x %08x.\n",
8127 dcode[0], dcode[1], dcode[2], dcode[3]);
8129 return QLA_FUNCTION_FAILED;
8132 dcode = (uint32_t *)req->ring;
8134 segments = FA_RISC_CODE_SEGMENTS;
8135 for (j = 0; j < segments; j++) {
8136 ql_dbg(ql_dbg_init, vha, 0x008d,
8137 "-> Loading segment %u...\n", j);
8138 qla24xx_read_flash_data(vha, dcode, faddr, 10);
8139 risc_addr = be32_to_cpu((__force __be32)dcode[2]);
8140 risc_size = be32_to_cpu((__force __be32)dcode[3]);
8142 *srisc_addr = risc_addr;
8143 risc_attr = be32_to_cpu((__force __be32)dcode[9]);
8146 dlen = ha->fw_transfer_size >> 2;
8147 for (fragment = 0; risc_size; fragment++) {
8148 if (dlen > risc_size)
8151 ql_dbg(ql_dbg_init, vha, 0x008e,
8152 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8153 fragment, risc_addr, faddr, dlen);
8154 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8155 for (i = 0; i < dlen; i++)
8156 dcode[i] = swab32(dcode[i]);
8158 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8160 ql_log(ql_log_fatal, vha, 0x008f,
8161 "-> Failed load firmware fragment %u.\n",
8163 return QLA_FUNCTION_FAILED;
8172 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8175 templates = (risc_attr & BIT_9) ? 2 : 1;
8176 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8177 for (j = 0; j < templates; j++, fwdt++) {
8178 vfree(fwdt->template);
8179 fwdt->template = NULL;
8182 dcode = (uint32_t *)req->ring;
8183 qla24xx_read_flash_data(vha, dcode, faddr, 7);
8184 risc_size = be32_to_cpu((__force __be32)dcode[2]);
8185 ql_dbg(ql_dbg_init, vha, 0x0161,
8186 "-> fwdt%u template array at %#x (%#x dwords)\n",
8187 j, faddr, risc_size);
8188 if (!risc_size || !~risc_size) {
8189 ql_dbg(ql_dbg_init, vha, 0x0162,
8190 "-> fwdt%u failed to read array\n", j);
8194 /* skip header and ignore checksum */
8198 ql_dbg(ql_dbg_init, vha, 0x0163,
8199 "-> fwdt%u template allocate template %#x words...\n",
8201 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8202 if (!fwdt->template) {
8203 ql_log(ql_log_warn, vha, 0x0164,
8204 "-> fwdt%u failed allocate template.\n", j);
8208 dcode = fwdt->template;
8209 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8211 if (!qla27xx_fwdt_template_valid(dcode)) {
8212 ql_log(ql_log_warn, vha, 0x0165,
8213 "-> fwdt%u failed template validate\n", j);
8217 dlen = qla27xx_fwdt_template_size(dcode);
8218 ql_dbg(ql_dbg_init, vha, 0x0166,
8219 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8220 j, dlen, dlen / sizeof(*dcode));
8221 if (dlen > risc_size * sizeof(*dcode)) {
8222 ql_log(ql_log_warn, vha, 0x0167,
8223 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8224 j, dlen - risc_size * sizeof(*dcode));
8228 fwdt->length = dlen;
8229 ql_dbg(ql_dbg_init, vha, 0x0168,
8230 "-> fwdt%u loaded template ok\n", j);
8232 faddr += risc_size + 1;
8238 vfree(fwdt->template);
8239 fwdt->template = NULL;
8245 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8248 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8254 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
8255 struct fw_blob *blob;
8256 struct qla_hw_data *ha = vha->hw;
8257 struct req_que *req = ha->req_q_map[0];
8259 /* Load firmware blob. */
8260 blob = qla2x00_request_firmware(vha);
8262 ql_log(ql_log_info, vha, 0x0083,
8263 "Firmware image unavailable.\n");
8264 ql_log(ql_log_info, vha, 0x0084,
8265 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
8266 return QLA_FUNCTION_FAILED;
8271 wcode = (uint16_t *)req->ring;
8273 fwcode = (__force __be16 *)blob->fw->data;
8276 /* Validate firmware image by checking version. */
8277 if (blob->fw->size < 8 * sizeof(uint16_t)) {
8278 ql_log(ql_log_fatal, vha, 0x0085,
8279 "Unable to verify integrity of firmware image (%zd).\n",
8281 goto fail_fw_integrity;
8283 for (i = 0; i < 4; i++)
8284 wcode[i] = be16_to_cpu(fwcode[i + 4]);
8285 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
8286 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
8287 wcode[2] == 0 && wcode[3] == 0)) {
8288 ql_log(ql_log_fatal, vha, 0x0086,
8289 "Unable to verify integrity of firmware image.\n");
8290 ql_log(ql_log_fatal, vha, 0x0087,
8291 "Firmware data: %04x %04x %04x %04x.\n",
8292 wcode[0], wcode[1], wcode[2], wcode[3]);
8293 goto fail_fw_integrity;
8297 while (*seg && rval == QLA_SUCCESS) {
8299 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
8300 risc_size = be16_to_cpu(fwcode[3]);
8302 /* Validate firmware image size. */
8303 fwclen += risc_size * sizeof(uint16_t);
8304 if (blob->fw->size < fwclen) {
8305 ql_log(ql_log_fatal, vha, 0x0088,
8306 "Unable to verify integrity of firmware image "
8307 "(%zd).\n", blob->fw->size);
8308 goto fail_fw_integrity;
8312 while (risc_size > 0 && rval == QLA_SUCCESS) {
8313 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8314 if (wlen > risc_size)
8316 ql_dbg(ql_dbg_init, vha, 0x0089,
8317 "Loading risc segment@ risc addr %x number of "
8318 "words 0x%x.\n", risc_addr, wlen);
8320 for (i = 0; i < wlen; i++)
8321 wcode[i] = swab16((__force u32)fwcode[i]);
8323 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8326 ql_log(ql_log_fatal, vha, 0x008a,
8327 "Failed to load segment %d of firmware.\n",
8344 return QLA_FUNCTION_FAILED;
8348 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8351 uint templates, segments, fragment;
8354 uint32_t risc_addr, risc_size, risc_attr = 0;
8357 struct fw_blob *blob;
8359 struct qla_hw_data *ha = vha->hw;
8360 struct req_que *req = ha->req_q_map[0];
8361 struct fwdt *fwdt = ha->fwdt;
8363 ql_dbg(ql_dbg_init, vha, 0x0090,
8364 "-> FW: Loading via request-firmware.\n");
8366 blob = qla2x00_request_firmware(vha);
8368 ql_log(ql_log_warn, vha, 0x0092,
8369 "-> Firmware file not found.\n");
8371 return QLA_FUNCTION_FAILED;
8374 fwcode = (__force __be32 *)blob->fw->data;
8375 dcode = (__force uint32_t *)fwcode;
8376 if (qla24xx_risc_firmware_invalid(dcode)) {
8377 ql_log(ql_log_fatal, vha, 0x0093,
8378 "Unable to verify integrity of firmware image (%zd).\n",
8380 ql_log(ql_log_fatal, vha, 0x0095,
8381 "Firmware data: %08x %08x %08x %08x.\n",
8382 dcode[0], dcode[1], dcode[2], dcode[3]);
8383 return QLA_FUNCTION_FAILED;
8386 dcode = (uint32_t *)req->ring;
8388 segments = FA_RISC_CODE_SEGMENTS;
8389 for (j = 0; j < segments; j++) {
8390 ql_dbg(ql_dbg_init, vha, 0x0096,
8391 "-> Loading segment %u...\n", j);
8392 risc_addr = be32_to_cpu(fwcode[2]);
8393 risc_size = be32_to_cpu(fwcode[3]);
8396 *srisc_addr = risc_addr;
8397 risc_attr = be32_to_cpu(fwcode[9]);
8400 dlen = ha->fw_transfer_size >> 2;
8401 for (fragment = 0; risc_size; fragment++) {
8402 if (dlen > risc_size)
8405 ql_dbg(ql_dbg_init, vha, 0x0097,
8406 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8407 fragment, risc_addr,
8408 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8411 for (i = 0; i < dlen; i++)
8412 dcode[i] = swab32((__force u32)fwcode[i]);
8414 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8416 ql_log(ql_log_fatal, vha, 0x0098,
8417 "-> Failed load firmware fragment %u.\n",
8419 return QLA_FUNCTION_FAILED;
8428 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8431 templates = (risc_attr & BIT_9) ? 2 : 1;
8432 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8433 for (j = 0; j < templates; j++, fwdt++) {
8434 vfree(fwdt->template);
8435 fwdt->template = NULL;
8438 risc_size = be32_to_cpu(fwcode[2]);
8439 ql_dbg(ql_dbg_init, vha, 0x0171,
8440 "-> fwdt%u template array at %#x (%#x dwords)\n",
8441 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8443 if (!risc_size || !~risc_size) {
8444 ql_dbg(ql_dbg_init, vha, 0x0172,
8445 "-> fwdt%u failed to read array\n", j);
8449 /* skip header and ignore checksum */
8453 ql_dbg(ql_dbg_init, vha, 0x0173,
8454 "-> fwdt%u template allocate template %#x words...\n",
8456 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8457 if (!fwdt->template) {
8458 ql_log(ql_log_warn, vha, 0x0174,
8459 "-> fwdt%u failed allocate template.\n", j);
8463 dcode = fwdt->template;
8464 for (i = 0; i < risc_size; i++)
8465 dcode[i] = (__force u32)fwcode[i];
8467 if (!qla27xx_fwdt_template_valid(dcode)) {
8468 ql_log(ql_log_warn, vha, 0x0175,
8469 "-> fwdt%u failed template validate\n", j);
8473 dlen = qla27xx_fwdt_template_size(dcode);
8474 ql_dbg(ql_dbg_init, vha, 0x0176,
8475 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8476 j, dlen, dlen / sizeof(*dcode));
8477 if (dlen > risc_size * sizeof(*dcode)) {
8478 ql_log(ql_log_warn, vha, 0x0177,
8479 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8480 j, dlen - risc_size * sizeof(*dcode));
8484 fwdt->length = dlen;
8485 ql_dbg(ql_dbg_init, vha, 0x0178,
8486 "-> fwdt%u loaded template ok\n", j);
8488 fwcode += risc_size + 1;
8494 vfree(fwdt->template);
8495 fwdt->template = NULL;
8502 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8506 if (ql2xfwloadbin == 1)
8507 return qla81xx_load_risc(vha, srisc_addr);
8511 * 1) Firmware via request-firmware interface (.bin file).
8512 * 2) Firmware residing in flash.
8514 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8515 if (rval == QLA_SUCCESS)
8518 return qla24xx_load_risc_flash(vha, srisc_addr,
8519 vha->hw->flt_region_fw);
8523 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8526 struct qla_hw_data *ha = vha->hw;
8527 struct active_regions active_regions = { };
8529 if (ql2xfwloadbin == 2)
8532 /* FW Load priority:
8533 * 1) Firmware residing in flash.
8534 * 2) Firmware via request-firmware interface (.bin file).
8535 * 3) Golden-Firmware residing in flash -- (limited operation).
8538 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8539 goto try_primary_fw;
8541 qla27xx_get_active_image(vha, &active_regions);
8543 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8544 goto try_primary_fw;
8546 ql_dbg(ql_dbg_init, vha, 0x008b,
8547 "Loading secondary firmware image.\n");
8548 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8553 ql_dbg(ql_dbg_init, vha, 0x008b,
8554 "Loading primary firmware image.\n");
8555 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8560 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8561 if (!rval || !ha->flt_region_gold_fw)
8564 ql_log(ql_log_info, vha, 0x0099,
8565 "Attempting to fallback to golden firmware.\n");
8566 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8570 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8571 ha->flags.running_gold_fw = 1;
8576 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8579 struct qla_hw_data *ha = vha->hw;
8581 if (ha->flags.pci_channel_io_perm_failure)
8583 if (!IS_FWI2_CAPABLE(ha))
8585 if (!ha->fw_major_version)
8587 if (!ha->flags.fw_started)
8590 ret = qla2x00_stop_firmware(vha);
8591 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8592 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8593 ha->isp_ops->reset_chip(vha);
8594 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8596 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8598 ql_log(ql_log_info, vha, 0x8015,
8599 "Attempting retry of stop-firmware command.\n");
8600 ret = qla2x00_stop_firmware(vha);
8604 ha->flags.fw_init_done = 0;
8608 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8610 int rval = QLA_SUCCESS;
8612 uint16_t mb[MAILBOX_REGISTER_COUNT];
8613 struct qla_hw_data *ha = vha->hw;
8614 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8619 rval = qla2x00_fw_ready(base_vha);
8621 if (rval == QLA_SUCCESS) {
8622 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8623 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8626 vha->flags.management_server_logged_in = 0;
8628 /* Login to SNS first */
8629 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8631 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8632 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8633 ql_dbg(ql_dbg_init, vha, 0x0120,
8634 "Failed SNS login: loop_id=%x, rval2=%d\n",
8637 ql_dbg(ql_dbg_init, vha, 0x0103,
8638 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8639 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8640 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8641 return (QLA_FUNCTION_FAILED);
8644 atomic_set(&vha->loop_down_timer, 0);
8645 atomic_set(&vha->loop_state, LOOP_UP);
8646 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8647 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8648 rval = qla2x00_loop_resync(base_vha);
8653 /* 84XX Support **************************************************************/
8655 static LIST_HEAD(qla_cs84xx_list);
8656 static DEFINE_MUTEX(qla_cs84xx_mutex);
8658 static struct qla_chip_state_84xx *
8659 qla84xx_get_chip(struct scsi_qla_host *vha)
8661 struct qla_chip_state_84xx *cs84xx;
8662 struct qla_hw_data *ha = vha->hw;
8664 mutex_lock(&qla_cs84xx_mutex);
8666 /* Find any shared 84xx chip. */
8667 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8668 if (cs84xx->bus == ha->pdev->bus) {
8669 kref_get(&cs84xx->kref);
8674 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8678 kref_init(&cs84xx->kref);
8679 spin_lock_init(&cs84xx->access_lock);
8680 mutex_init(&cs84xx->fw_update_mutex);
8681 cs84xx->bus = ha->pdev->bus;
8683 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8685 mutex_unlock(&qla_cs84xx_mutex);
8690 __qla84xx_chip_release(struct kref *kref)
8692 struct qla_chip_state_84xx *cs84xx =
8693 container_of(kref, struct qla_chip_state_84xx, kref);
8695 mutex_lock(&qla_cs84xx_mutex);
8696 list_del(&cs84xx->list);
8697 mutex_unlock(&qla_cs84xx_mutex);
8702 qla84xx_put_chip(struct scsi_qla_host *vha)
8704 struct qla_hw_data *ha = vha->hw;
8707 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8711 qla84xx_init_chip(scsi_qla_host_t *vha)
8715 struct qla_hw_data *ha = vha->hw;
8717 mutex_lock(&ha->cs84xx->fw_update_mutex);
8719 rval = qla84xx_verify_chip(vha, status);
8721 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8723 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8727 /* 81XX Support **************************************************************/
8730 qla81xx_nvram_config(scsi_qla_host_t *vha)
8733 struct init_cb_81xx *icb;
8734 struct nvram_81xx *nv;
8736 uint8_t *dptr1, *dptr2;
8739 struct qla_hw_data *ha = vha->hw;
8741 struct active_regions active_regions = { };
8744 icb = (struct init_cb_81xx *)ha->init_cb;
8747 /* Determine NVRAM starting address. */
8748 ha->nvram_size = sizeof(*nv);
8749 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8751 ha->vpd_size = FA_VPD_SIZE_82XX;
8753 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8754 qla28xx_get_aux_images(vha, &active_regions);
8756 /* Get VPD data into cache */
8757 ha->vpd = ha->nvram + VPD_OFFSET;
8759 faddr = ha->flt_region_vpd;
8760 if (IS_QLA28XX(ha)) {
8761 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8762 faddr = ha->flt_region_vpd_sec;
8763 ql_dbg(ql_dbg_init, vha, 0x0110,
8764 "Loading %s nvram image.\n",
8765 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8766 "primary" : "secondary");
8768 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8770 /* Get NVRAM data into cache and calculate checksum. */
8771 faddr = ha->flt_region_nvram;
8772 if (IS_QLA28XX(ha)) {
8773 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8774 faddr = ha->flt_region_nvram_sec;
8776 ql_dbg(ql_dbg_init, vha, 0x0110,
8777 "Loading %s nvram image.\n",
8778 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8779 "primary" : "secondary");
8780 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8782 dptr = (__force __le32 *)nv;
8783 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8784 chksum += le32_to_cpu(*dptr);
8786 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8787 "Contents of NVRAM:\n");
8788 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8789 nv, ha->nvram_size);
8791 /* Bad NVRAM data, set defaults parameters. */
8792 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8793 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8794 /* Reset NVRAM data. */
8795 ql_log(ql_log_info, vha, 0x0073,
8796 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8797 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8798 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8799 ql_log(ql_log_info, vha, 0x0074,
8800 "Falling back to functioning (yet invalid -- WWPN) "
8804 * Set default initialization control block.
8806 memset(nv, 0, ha->nvram_size);
8807 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8808 nv->version = cpu_to_le16(ICB_VERSION);
8809 nv->frame_payload_size = cpu_to_le16(2048);
8810 nv->execution_throttle = cpu_to_le16(0xFFFF);
8811 nv->exchange_count = cpu_to_le16(0);
8812 nv->port_name[0] = 0x21;
8813 nv->port_name[1] = 0x00 + ha->port_no + 1;
8814 nv->port_name[2] = 0x00;
8815 nv->port_name[3] = 0xe0;
8816 nv->port_name[4] = 0x8b;
8817 nv->port_name[5] = 0x1c;
8818 nv->port_name[6] = 0x55;
8819 nv->port_name[7] = 0x86;
8820 nv->node_name[0] = 0x20;
8821 nv->node_name[1] = 0x00;
8822 nv->node_name[2] = 0x00;
8823 nv->node_name[3] = 0xe0;
8824 nv->node_name[4] = 0x8b;
8825 nv->node_name[5] = 0x1c;
8826 nv->node_name[6] = 0x55;
8827 nv->node_name[7] = 0x86;
8828 nv->login_retry_count = cpu_to_le16(8);
8829 nv->interrupt_delay_timer = cpu_to_le16(0);
8830 nv->login_timeout = cpu_to_le16(0);
8831 nv->firmware_options_1 =
8832 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8833 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8834 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8835 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8836 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8837 nv->efi_parameters = cpu_to_le32(0);
8838 nv->reset_delay = 5;
8839 nv->max_luns_per_target = cpu_to_le16(128);
8840 nv->port_down_retry_count = cpu_to_le16(30);
8841 nv->link_down_timeout = cpu_to_le16(180);
8842 nv->enode_mac[0] = 0x00;
8843 nv->enode_mac[1] = 0xC0;
8844 nv->enode_mac[2] = 0xDD;
8845 nv->enode_mac[3] = 0x04;
8846 nv->enode_mac[4] = 0x05;
8847 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8852 if (IS_T10_PI_CAPABLE(ha))
8853 nv->frame_payload_size &= cpu_to_le16(~7);
8855 qlt_81xx_config_nvram_stage1(vha, nv);
8857 /* Reset Initialization control block */
8858 memset(icb, 0, ha->init_cb_size);
8860 /* Copy 1st segment. */
8861 dptr1 = (uint8_t *)icb;
8862 dptr2 = (uint8_t *)&nv->version;
8863 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8865 *dptr1++ = *dptr2++;
8867 icb->login_retry_count = nv->login_retry_count;
8869 /* Copy 2nd segment. */
8870 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8871 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8872 cnt = (uint8_t *)&icb->reserved_5 -
8873 (uint8_t *)&icb->interrupt_delay_timer;
8875 *dptr1++ = *dptr2++;
8877 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8878 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8879 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8880 icb->enode_mac[0] = 0x00;
8881 icb->enode_mac[1] = 0xC0;
8882 icb->enode_mac[2] = 0xDD;
8883 icb->enode_mac[3] = 0x04;
8884 icb->enode_mac[4] = 0x05;
8885 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8888 /* Use extended-initialization control block. */
8889 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8890 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8892 * Setup driver NVRAM options.
8894 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8897 qlt_81xx_config_nvram_stage2(vha, icb);
8899 /* Use alternate WWN? */
8900 if (nv->host_p & cpu_to_le32(BIT_15)) {
8901 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8902 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8905 /* Prepare nodename */
8906 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8908 * Firmware will apply the following mask if the nodename was
8911 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8912 icb->node_name[0] &= 0xF0;
8915 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
8916 if ((nv->enhanced_features & BIT_7) == 0)
8917 ha->flags.scm_supported_a = 1;
8920 /* Set host adapter parameters. */
8921 ha->flags.disable_risc_code_load = 0;
8922 ha->flags.enable_lip_reset = 0;
8923 ha->flags.enable_lip_full_login =
8924 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8925 ha->flags.enable_target_reset =
8926 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8927 ha->flags.enable_led_scheme = 0;
8928 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8930 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8931 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8933 /* save HBA serial number */
8934 ha->serial0 = icb->port_name[5];
8935 ha->serial1 = icb->port_name[6];
8936 ha->serial2 = icb->port_name[7];
8937 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8938 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8940 icb->execution_throttle = cpu_to_le16(0xFFFF);
8942 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8944 /* Set minimum login_timeout to 4 seconds. */
8945 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8946 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8947 if (le16_to_cpu(nv->login_timeout) < 4)
8948 nv->login_timeout = cpu_to_le16(4);
8949 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8951 /* Set minimum RATOV to 100 tenths of a second. */
8954 ha->loop_reset_delay = nv->reset_delay;
8956 /* Link Down Timeout = 0:
8958 * When Port Down timer expires we will start returning
8959 * I/O's to OS with "DID_NO_CONNECT".
8961 * Link Down Timeout != 0:
8963 * The driver waits for the link to come up after link down
8964 * before returning I/Os to OS with "DID_NO_CONNECT".
8966 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8967 ha->loop_down_abort_time =
8968 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8970 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8971 ha->loop_down_abort_time =
8972 (LOOP_DOWN_TIME - ha->link_down_timeout);
8975 /* Need enough time to try and get the port back. */
8976 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8977 if (qlport_down_retry)
8978 ha->port_down_retry_count = qlport_down_retry;
8980 /* Set login_retry_count */
8981 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8982 if (ha->port_down_retry_count ==
8983 le16_to_cpu(nv->port_down_retry_count) &&
8984 ha->port_down_retry_count > 3)
8985 ha->login_retry_count = ha->port_down_retry_count;
8986 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8987 ha->login_retry_count = ha->port_down_retry_count;
8988 if (ql2xloginretrycount)
8989 ha->login_retry_count = ql2xloginretrycount;
8991 /* if not running MSI-X we need handshaking on interrupts */
8992 if (!vha->hw->flags.msix_enabled &&
8993 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
8994 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
8997 if (!vha->flags.init_done) {
8998 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8999 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
9000 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
9001 le16_to_cpu(icb->interrupt_delay_timer) : 2;
9003 icb->firmware_options_2 &= cpu_to_le32(
9004 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
9005 vha->flags.process_response_queue = 0;
9006 if (ha->zio_mode != QLA_ZIO_DISABLED) {
9007 ha->zio_mode = QLA_ZIO_MODE_6;
9009 ql_log(ql_log_info, vha, 0x0075,
9010 "ZIO mode %d enabled; timer delay (%d us).\n",
9012 ha->zio_timer * 100);
9014 icb->firmware_options_2 |= cpu_to_le32(
9015 (uint32_t)ha->zio_mode);
9016 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9017 vha->flags.process_response_queue = 1;
9020 /* enable RIDA Format2 */
9021 icb->firmware_options_3 |= cpu_to_le32(BIT_0);
9023 /* N2N: driver will initiate Login instead of FW */
9024 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
9026 /* Determine NVMe/FCP priority for target ports */
9027 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9030 ql_log(ql_log_warn, vha, 0x0076,
9031 "NVRAM configuration failed.\n");
9037 qla82xx_restart_isp(scsi_qla_host_t *vha)
9040 struct qla_hw_data *ha = vha->hw;
9041 struct scsi_qla_host *vp, *tvp;
9042 unsigned long flags;
9044 status = qla2x00_init_rings(vha);
9046 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9047 ha->flags.chip_reset_done = 1;
9049 status = qla2x00_fw_ready(vha);
9051 /* Issue a marker after FW becomes ready. */
9052 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9053 vha->flags.online = 1;
9054 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9057 /* if no cable then assume it's good */
9058 if ((vha->device_flags & DFLG_NO_CABLE))
9063 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9065 if (!atomic_read(&vha->loop_down_timer)) {
9067 * Issue marker command only when we are going
9068 * to start the I/O .
9070 vha->marker_needed = 1;
9073 ha->isp_ops->enable_intrs(ha);
9075 ha->isp_abort_cnt = 0;
9076 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9078 /* Update the firmware version */
9079 status = qla82xx_check_md_needed(vha);
9082 ha->flags.fce_enabled = 1;
9084 fce_calc_size(ha->fce_bufs));
9085 rval = qla2x00_enable_fce_trace(vha,
9086 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9089 ql_log(ql_log_warn, vha, 0x8001,
9090 "Unable to reinitialize FCE (%d).\n",
9092 ha->flags.fce_enabled = 0;
9097 memset(ha->eft, 0, EFT_SIZE);
9098 rval = qla2x00_enable_eft_trace(vha,
9099 ha->eft_dma, EFT_NUM_BUFFERS);
9101 ql_log(ql_log_warn, vha, 0x8010,
9102 "Unable to reinitialize EFT (%d).\n",
9109 ql_dbg(ql_dbg_taskm, vha, 0x8011,
9110 "qla82xx_restart_isp succeeded.\n");
9112 spin_lock_irqsave(&ha->vport_slock, flags);
9113 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9115 atomic_inc(&vp->vref_count);
9116 spin_unlock_irqrestore(&ha->vport_slock, flags);
9118 qla2x00_vp_abort_isp(vp);
9120 spin_lock_irqsave(&ha->vport_slock, flags);
9121 atomic_dec(&vp->vref_count);
9124 spin_unlock_irqrestore(&ha->vport_slock, flags);
9127 ql_log(ql_log_warn, vha, 0x8016,
9128 "qla82xx_restart_isp **** FAILED ****.\n");
9135 * qla24xx_get_fcp_prio
9136 * Gets the fcp cmd priority value for the logged in port.
9137 * Looks for a match of the port descriptors within
9138 * each of the fcp prio config entries. If a match is found,
9139 * the tag (priority) value is returned.
9142 * vha = scsi host structure pointer.
9143 * fcport = port structure pointer.
9146 * non-zero (if found)
9153 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9156 uint8_t pid_match, wwn_match;
9158 uint32_t pid1, pid2;
9159 uint64_t wwn1, wwn2;
9160 struct qla_fcp_prio_entry *pri_entry;
9161 struct qla_hw_data *ha = vha->hw;
9163 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9167 entries = ha->fcp_prio_cfg->num_entries;
9168 pri_entry = &ha->fcp_prio_cfg->entry[0];
9170 for (i = 0; i < entries; i++) {
9171 pid_match = wwn_match = 0;
9173 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
9178 /* check source pid for a match */
9179 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
9180 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
9181 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9182 if (pid1 == INVALID_PORT_ID)
9184 else if (pid1 == pid2)
9188 /* check destination pid for a match */
9189 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
9190 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
9191 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
9192 if (pid1 == INVALID_PORT_ID)
9194 else if (pid1 == pid2)
9198 /* check source WWN for a match */
9199 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
9200 wwn1 = wwn_to_u64(vha->port_name);
9201 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
9202 if (wwn2 == (uint64_t)-1)
9204 else if (wwn1 == wwn2)
9208 /* check destination WWN for a match */
9209 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
9210 wwn1 = wwn_to_u64(fcport->port_name);
9211 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
9212 if (wwn2 == (uint64_t)-1)
9214 else if (wwn1 == wwn2)
9218 if (pid_match == 2 || wwn_match == 2) {
9219 /* Found a matching entry */
9220 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
9221 priority = pri_entry->tag;
9232 * qla24xx_update_fcport_fcp_prio
9233 * Activates fcp priority for the logged in fc port
9236 * vha = scsi host structure pointer.
9237 * fcp = port structure pointer.
9240 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9246 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9252 if (fcport->port_type != FCT_TARGET ||
9253 fcport->loop_id == FC_NO_LOOP_ID)
9254 return QLA_FUNCTION_FAILED;
9256 priority = qla24xx_get_fcp_prio(vha, fcport);
9258 return QLA_FUNCTION_FAILED;
9260 if (IS_P3P_TYPE(vha->hw)) {
9261 fcport->fcp_prio = priority & 0xf;
9265 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9266 if (ret == QLA_SUCCESS) {
9267 if (fcport->fcp_prio != priority)
9268 ql_dbg(ql_dbg_user, vha, 0x709e,
9269 "Updated FCP_CMND priority - value=%d loop_id=%d "
9270 "port_id=%02x%02x%02x.\n", priority,
9271 fcport->loop_id, fcport->d_id.b.domain,
9272 fcport->d_id.b.area, fcport->d_id.b.al_pa);
9273 fcport->fcp_prio = priority & 0xf;
9275 ql_dbg(ql_dbg_user, vha, 0x704f,
9276 "Unable to update FCP_CMND priority - ret=0x%x for "
9277 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
9278 fcport->d_id.b.domain, fcport->d_id.b.area,
9279 fcport->d_id.b.al_pa);
9284 * qla24xx_update_all_fcp_prio
9285 * Activates fcp priority for all the logged in ports
9288 * ha = adapter block pointer.
9291 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9297 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9302 ret = QLA_FUNCTION_FAILED;
9303 /* We need to set priority for all logged in ports */
9304 list_for_each_entry(fcport, &vha->vp_fcports, list)
9305 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9310 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9311 int vp_idx, bool startqp)
9316 struct qla_hw_data *ha = vha->hw;
9317 uint16_t qpair_id = 0;
9318 struct qla_qpair *qpair = NULL;
9319 struct qla_msix_entry *msix;
9321 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9322 ql_log(ql_log_warn, vha, 0x00181,
9323 "FW/Driver is not multi-queue capable.\n");
9327 if (ql2xmqsupport || ql2xnvmeenable) {
9328 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9329 if (qpair == NULL) {
9330 ql_log(ql_log_warn, vha, 0x0182,
9331 "Failed to allocate memory for queue pair.\n");
9335 qpair->hw = vha->hw;
9337 qpair->qp_lock_ptr = &qpair->qp_lock;
9338 spin_lock_init(&qpair->qp_lock);
9339 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9341 /* Assign available que pair id */
9342 mutex_lock(&ha->mq_lock);
9343 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9344 if (ha->num_qpairs >= ha->max_qpairs) {
9345 mutex_unlock(&ha->mq_lock);
9346 ql_log(ql_log_warn, vha, 0x0183,
9347 "No resources to create additional q pair.\n");
9351 set_bit(qpair_id, ha->qpair_qid_map);
9352 ha->queue_pair_map[qpair_id] = qpair;
9353 qpair->id = qpair_id;
9354 qpair->vp_idx = vp_idx;
9355 qpair->fw_started = ha->flags.fw_started;
9356 INIT_LIST_HEAD(&qpair->hints_list);
9357 qpair->chip_reset = ha->base_qpair->chip_reset;
9358 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9359 qpair->enable_explicit_conf =
9360 ha->base_qpair->enable_explicit_conf;
9362 for (i = 0; i < ha->msix_count; i++) {
9363 msix = &ha->msix_entries[i];
9367 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9368 "Vector %x selected for qpair\n", msix->vector);
9372 ql_log(ql_log_warn, vha, 0x0184,
9373 "Out of MSI-X vectors!.\n");
9377 qpair->msix->in_use = 1;
9378 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9379 qpair->pdev = ha->pdev;
9380 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9381 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9383 mutex_unlock(&ha->mq_lock);
9385 /* Create response queue first */
9386 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9388 ql_log(ql_log_warn, vha, 0x0185,
9389 "Failed to create response queue.\n");
9393 qpair->rsp = ha->rsp_q_map[rsp_id];
9395 /* Create request queue */
9396 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9399 ql_log(ql_log_warn, vha, 0x0186,
9400 "Failed to create request queue.\n");
9404 qpair->req = ha->req_q_map[req_id];
9405 qpair->rsp->req = qpair->req;
9406 qpair->rsp->qpair = qpair;
9407 /* init qpair to this cpu. Will adjust at run time. */
9408 qla_cpu_update(qpair, raw_smp_processor_id());
9410 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9411 if (ha->fw_attributes & BIT_4)
9412 qpair->difdix_supported = 1;
9415 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9416 if (!qpair->srb_mempool) {
9417 ql_log(ql_log_warn, vha, 0xd036,
9418 "Failed to create srb mempool for qpair %d\n",
9423 /* Mark as online */
9426 if (!vha->flags.qpairs_available)
9427 vha->flags.qpairs_available = 1;
9429 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9430 "Request/Response queue pair created, id %d\n",
9432 ql_dbg(ql_dbg_init, vha, 0x0187,
9433 "Request/Response queue pair created, id %d\n",
9440 qla25xx_delete_rsp_que(vha, qpair->rsp);
9442 mutex_lock(&ha->mq_lock);
9443 qpair->msix->in_use = 0;
9444 list_del(&qpair->qp_list_elem);
9445 if (list_empty(&vha->qp_list))
9446 vha->flags.qpairs_available = 0;
9448 ha->queue_pair_map[qpair_id] = NULL;
9449 clear_bit(qpair_id, ha->qpair_qid_map);
9451 mutex_unlock(&ha->mq_lock);
9457 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9459 int ret = QLA_FUNCTION_FAILED;
9460 struct qla_hw_data *ha = qpair->hw;
9462 qpair->delete_in_progress = 1;
9464 ret = qla25xx_delete_req_que(vha, qpair->req);
9465 if (ret != QLA_SUCCESS)
9468 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9469 if (ret != QLA_SUCCESS)
9472 mutex_lock(&ha->mq_lock);
9473 ha->queue_pair_map[qpair->id] = NULL;
9474 clear_bit(qpair->id, ha->qpair_qid_map);
9476 list_del(&qpair->qp_list_elem);
9477 if (list_empty(&vha->qp_list)) {
9478 vha->flags.qpairs_available = 0;
9479 vha->flags.qpairs_req_created = 0;
9480 vha->flags.qpairs_rsp_created = 0;
9482 mempool_destroy(qpair->srb_mempool);
9484 mutex_unlock(&ha->mq_lock);
9492 qla2x00_count_set_bits(uint32_t num)
9494 /* Brian Kernighan's Algorithm */
9505 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9513 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9514 if (f->port_type != FCT_TARGET)
9521 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
9523 scsi_qla_host_t *vha = shost_priv(host);
9524 fc_port_t *fcport = NULL;
9525 unsigned long int_flags;
9527 if (flags & QLA2XX_HW_ERROR)
9528 vha->hw_err_cnt = 0;
9529 if (flags & QLA2XX_SHT_LNK_DWN)
9530 vha->short_link_down_cnt = 0;
9531 if (flags & QLA2XX_INT_ERR)
9532 vha->interface_err_cnt = 0;
9533 if (flags & QLA2XX_CMD_TIMEOUT)
9534 vha->cmd_timeout_cnt = 0;
9535 if (flags & QLA2XX_RESET_CMD_ERR)
9536 vha->reset_cmd_err_cnt = 0;
9537 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9538 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9539 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9540 fcport->tgt_short_link_down_cnt = 0;
9541 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9543 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9545 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9549 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
9551 return qla2xxx_reset_stats(host, flags);
9554 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
9556 return qla2xxx_reset_stats(host, flags);
9559 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
9560 void *data, u64 size)
9562 scsi_qla_host_t *vha = shost_priv(host);
9563 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
9564 struct ql_vnd_stats *rsp_data = &resp->stats;
9565 u64 ini_entry_count = 0;
9567 u64 entry_count = 0;
9569 u32 tmp_stat_type = 0;
9570 fc_port_t *fcport = NULL;
9571 unsigned long int_flags;
9573 /* Copy stat type to work on it */
9574 tmp_stat_type = flags;
9576 if (tmp_stat_type & BIT_17) {
9577 num_tgt = qla2x00_get_num_tgts(vha);
9579 tmp_stat_type &= ~(1 << 17);
9581 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
9583 entry_count = ini_entry_count + num_tgt;
9585 rsp_data->entry_count = entry_count;
9588 if (flags & QLA2XX_HW_ERROR) {
9589 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
9590 rsp_data->entry[i].tgt_num = 0x0;
9591 rsp_data->entry[i].cnt = vha->hw_err_cnt;
9595 if (flags & QLA2XX_SHT_LNK_DWN) {
9596 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
9597 rsp_data->entry[i].tgt_num = 0x0;
9598 rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9602 if (flags & QLA2XX_INT_ERR) {
9603 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
9604 rsp_data->entry[i].tgt_num = 0x0;
9605 rsp_data->entry[i].cnt = vha->interface_err_cnt;
9609 if (flags & QLA2XX_CMD_TIMEOUT) {
9610 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
9611 rsp_data->entry[i].tgt_num = 0x0;
9612 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9616 if (flags & QLA2XX_RESET_CMD_ERR) {
9617 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
9618 rsp_data->entry[i].tgt_num = 0x0;
9619 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9623 /* i will continue from previous loop, as target
9624 * entries are after initiator
9626 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9627 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9628 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9629 if (fcport->port_type != FCT_TARGET)
9633 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
9634 rsp_data->entry[i].tgt_num = fcport->rport->number;
9635 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
9638 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9640 resp->status = EXT_STATUS_OK;
9645 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
9646 struct fc_rport *rport, void *data, u64 size)
9648 struct ql_vnd_tgt_stats_resp *tgt_data = data;
9649 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
9651 tgt_data->status = 0;
9652 tgt_data->stats.entry_count = 1;
9653 tgt_data->stats.entry[0].stat_type = flags;
9654 tgt_data->stats.entry[0].tgt_num = rport->number;
9655 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
9660 int qla2xxx_disable_port(struct Scsi_Host *host)
9662 scsi_qla_host_t *vha = shost_priv(host);
9664 vha->hw->flags.port_isolated = 1;
9666 if (qla2x00_chip_is_down(vha))
9669 if (vha->flags.online) {
9670 qla2x00_abort_isp_cleanup(vha);
9671 qla2x00_wait_for_sess_deletion(vha);
9677 int qla2xxx_enable_port(struct Scsi_Host *host)
9679 scsi_qla_host_t *vha = shost_priv(host);
9681 vha->hw->flags.port_isolated = 0;
9682 /* Set the flag to 1, so that isp_abort can proceed */
9683 vha->flags.online = 1;
9684 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9685 qla2xxx_wake_dpc(vha);