2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 vha->qla_stats.control_requests++;
60 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
64 * Returns a pointer to the @ha's ms_iocb.
67 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 struct qla_hw_data *ha = vha->hw;
70 struct ct_entry_24xx *ct_pkt;
72 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
73 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
89 ct_pkt->vp_index = vha->vp_idx;
91 vha->qla_stats.control_requests++;
97 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
98 * @p: CT request buffer
100 * @rsp_size: response size in bytes
102 * Returns a pointer to the intitialized @ct_req.
104 static inline struct ct_sns_req *
105 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 memset(p, 0, sizeof(struct ct_sns_pkt));
109 p->p.req.header.revision = 0x01;
110 p->p.req.header.gs_type = 0xFC;
111 p->p.req.header.gs_subtype = 0x02;
112 p->p.req.command = cpu_to_be16(cmd);
113 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
119 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
120 struct ct_sns_rsp *ct_rsp, const char *routine)
123 uint16_t comp_status;
124 struct qla_hw_data *ha = vha->hw;
125 bool lid_is_sns = false;
127 rval = QLA_FUNCTION_FAILED;
128 if (ms_pkt->entry_status != 0) {
129 ql_dbg(ql_dbg_disc, vha, 0x2031,
130 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
131 routine, ms_pkt->entry_status, vha->d_id.b.domain,
132 vha->d_id.b.area, vha->d_id.b.al_pa);
134 if (IS_FWI2_CAPABLE(ha))
135 comp_status = le16_to_cpu(
136 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 comp_status = le16_to_cpu(ms_pkt->status);
139 switch (comp_status) {
141 case CS_DATA_UNDERRUN:
142 case CS_DATA_OVERRUN: /* Overrun? */
143 if (ct_rsp->header.response !=
144 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
147 routine, vha->d_id.b.domain,
148 vha->d_id.b.area, vha->d_id.b.al_pa,
149 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 offsetof(typeof(*ct_rsp), rsp));
153 rval = QLA_INVALID_COMMAND;
157 case CS_PORT_LOGGED_OUT:
158 if (IS_FWI2_CAPABLE(ha)) {
159 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
168 ql_dbg(ql_dbg_async, vha, 0x502b,
169 "%s failed, Name server has logged out",
171 rval = QLA_NOT_LOGGED_IN;
172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
177 rval = QLA_FUNCTION_TIMEOUT;
180 ql_dbg(ql_dbg_disc, vha, 0x2033,
181 "%s failed, completion status (%x) on port_id: "
182 "%02x%02x%02x.\n", routine, comp_status,
183 vha->d_id.b.domain, vha->d_id.b.area,
192 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194 * @fcport: fcport entry to updated
196 * Returns 0 on success.
199 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
203 ms_iocb_entry_t *ms_pkt;
204 struct ct_sns_req *ct_req;
205 struct ct_sns_rsp *ct_rsp;
206 struct qla_hw_data *ha = vha->hw;
209 if (IS_QLA2100(ha) || IS_QLA2200(ha))
210 return qla2x00_sns_ga_nxt(vha, fcport);
212 arg.iocb = ha->ms_iocb;
213 arg.req_dma = ha->ct_sns_dma;
214 arg.rsp_dma = ha->ct_sns_dma;
215 arg.req_size = GA_NXT_REQ_SIZE;
216 arg.rsp_size = GA_NXT_RSP_SIZE;
217 arg.nport_handle = NPH_SNS;
220 /* Prepare common MS IOCB */
221 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 /* Prepare CT request */
224 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 ct_rsp = &ha->ct_sns->p.rsp;
228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
231 /* Execute MS IOCB */
232 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
233 sizeof(ms_iocb_entry_t));
234 if (rval != QLA_SUCCESS) {
236 ql_dbg(ql_dbg_disc, vha, 0x2062,
237 "GA_NXT issue IOCB failed (%d).\n", rval);
238 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
240 rval = QLA_FUNCTION_FAILED;
242 /* Populate fc_port_t entry. */
243 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
245 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
247 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
250 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
251 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
253 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
254 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
255 fcport->d_id.b.domain = 0xf0;
257 ql_dbg(ql_dbg_disc, vha, 0x2063,
258 "GA_NXT entry - nn %8phN pn %8phN "
259 "port_id=%02x%02x%02x.\n",
260 fcport->node_name, fcport->port_name,
261 fcport->d_id.b.domain, fcport->d_id.b.area,
262 fcport->d_id.b.al_pa);
269 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
271 return vha->hw->max_fibre_devices * 4 + 16;
275 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
277 * @list: switch info entries to populate
279 * NOTE: Non-Nx_Ports are not requested.
281 * Returns 0 on success.
284 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
289 ms_iocb_entry_t *ms_pkt;
290 struct ct_sns_req *ct_req;
291 struct ct_sns_rsp *ct_rsp;
293 struct ct_sns_gid_pt_data *gid_data;
294 struct qla_hw_data *ha = vha->hw;
295 uint16_t gid_pt_rsp_size;
298 if (IS_QLA2100(ha) || IS_QLA2200(ha))
299 return qla2x00_sns_gid_pt(vha, list);
302 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
304 arg.iocb = ha->ms_iocb;
305 arg.req_dma = ha->ct_sns_dma;
306 arg.rsp_dma = ha->ct_sns_dma;
307 arg.req_size = GID_PT_REQ_SIZE;
308 arg.rsp_size = gid_pt_rsp_size;
309 arg.nport_handle = NPH_SNS;
312 /* Prepare common MS IOCB */
313 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
315 /* Prepare CT request */
316 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
317 ct_rsp = &ha->ct_sns->p.rsp;
319 /* Prepare CT arguments -- port_type */
320 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
322 /* Execute MS IOCB */
323 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
324 sizeof(ms_iocb_entry_t));
325 if (rval != QLA_SUCCESS) {
327 ql_dbg(ql_dbg_disc, vha, 0x2055,
328 "GID_PT issue IOCB failed (%d).\n", rval);
329 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
331 rval = QLA_FUNCTION_FAILED;
333 /* Set port IDs in switch info list. */
334 for (i = 0; i < ha->max_fibre_devices; i++) {
335 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
336 list[i].d_id = be_to_port_id(gid_data->port_id);
337 memset(list[i].fabric_port_name, 0, WWN_SIZE);
338 list[i].fp_speed = PORT_SPEED_UNKNOWN;
341 if (gid_data->control_byte & BIT_7) {
342 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
348 * If we've used all available slots, then the switch is
349 * reporting back more devices than we can handle with this
350 * single call. Return a failed status, and let GA_NXT handle
353 if (i == ha->max_fibre_devices)
354 rval = QLA_FUNCTION_FAILED;
361 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
363 * @list: switch info entries to populate
365 * Returns 0 on success.
368 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
370 int rval = QLA_SUCCESS;
373 ms_iocb_entry_t *ms_pkt;
374 struct ct_sns_req *ct_req;
375 struct ct_sns_rsp *ct_rsp;
376 struct qla_hw_data *ha = vha->hw;
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return qla2x00_sns_gpn_id(vha, list);
382 arg.iocb = ha->ms_iocb;
383 arg.req_dma = ha->ct_sns_dma;
384 arg.rsp_dma = ha->ct_sns_dma;
385 arg.req_size = GPN_ID_REQ_SIZE;
386 arg.rsp_size = GPN_ID_RSP_SIZE;
387 arg.nport_handle = NPH_SNS;
389 for (i = 0; i < ha->max_fibre_devices; i++) {
391 /* Prepare common MS IOCB */
392 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
394 /* Prepare CT request */
395 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
397 ct_rsp = &ha->ct_sns->p.rsp;
399 /* Prepare CT arguments -- port_id */
400 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
402 /* Execute MS IOCB */
403 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
404 sizeof(ms_iocb_entry_t));
405 if (rval != QLA_SUCCESS) {
407 ql_dbg(ql_dbg_disc, vha, 0x2056,
408 "GPN_ID issue IOCB failed (%d).\n", rval);
410 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
411 "GPN_ID") != QLA_SUCCESS) {
412 rval = QLA_FUNCTION_FAILED;
416 memcpy(list[i].port_name,
417 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
420 /* Last device exit. */
421 if (list[i].d_id.b.rsvd_1 != 0)
429 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
431 * @list: switch info entries to populate
433 * Returns 0 on success.
436 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
438 int rval = QLA_SUCCESS;
440 struct qla_hw_data *ha = vha->hw;
441 ms_iocb_entry_t *ms_pkt;
442 struct ct_sns_req *ct_req;
443 struct ct_sns_rsp *ct_rsp;
446 if (IS_QLA2100(ha) || IS_QLA2200(ha))
447 return qla2x00_sns_gnn_id(vha, list);
449 arg.iocb = ha->ms_iocb;
450 arg.req_dma = ha->ct_sns_dma;
451 arg.rsp_dma = ha->ct_sns_dma;
452 arg.req_size = GNN_ID_REQ_SIZE;
453 arg.rsp_size = GNN_ID_RSP_SIZE;
454 arg.nport_handle = NPH_SNS;
456 for (i = 0; i < ha->max_fibre_devices; i++) {
458 /* Prepare common MS IOCB */
459 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
461 /* Prepare CT request */
462 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
464 ct_rsp = &ha->ct_sns->p.rsp;
466 /* Prepare CT arguments -- port_id */
467 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
469 /* Execute MS IOCB */
470 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
471 sizeof(ms_iocb_entry_t));
472 if (rval != QLA_SUCCESS) {
474 ql_dbg(ql_dbg_disc, vha, 0x2057,
475 "GNN_ID issue IOCB failed (%d).\n", rval);
477 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
478 "GNN_ID") != QLA_SUCCESS) {
479 rval = QLA_FUNCTION_FAILED;
483 memcpy(list[i].node_name,
484 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
486 ql_dbg(ql_dbg_disc, vha, 0x2058,
487 "GID_PT entry - nn %8phN pn %8phN "
488 "portid=%02x%02x%02x.\n",
489 list[i].node_name, list[i].port_name,
490 list[i].d_id.b.domain, list[i].d_id.b.area,
491 list[i].d_id.b.al_pa);
494 /* Last device exit. */
495 if (list[i].d_id.b.rsvd_1 != 0)
502 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
504 struct scsi_qla_host *vha = sp->vha;
505 struct ct_sns_pkt *ct_sns;
506 struct qla_work_evt *e;
509 if (rc == QLA_SUCCESS) {
510 ql_dbg(ql_dbg_disc, vha, 0x204f,
511 "Async done-%s exiting normally.\n",
513 } else if (rc == QLA_FUNCTION_TIMEOUT) {
514 ql_dbg(ql_dbg_disc, vha, 0x204f,
515 "Async done-%s timeout\n", sp->name);
517 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
518 memset(ct_sns, 0, sizeof(*ct_sns));
520 if (sp->retry_count > 3)
523 ql_dbg(ql_dbg_disc, vha, 0x204f,
524 "Async done-%s fail rc %x. Retry count %d\n",
525 sp->name, rc, sp->retry_count);
527 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
531 del_timer(&sp->u.iocb_cmd.timer);
533 qla2x00_post_work(vha, e);
538 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
541 /* please ignore kernel warning. otherwise, we have mem leak. */
542 if (sp->u.iocb_cmd.u.ctarg.req) {
543 dma_free_coherent(&vha->hw->pdev->dev,
544 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
545 sp->u.iocb_cmd.u.ctarg.req,
546 sp->u.iocb_cmd.u.ctarg.req_dma);
547 sp->u.iocb_cmd.u.ctarg.req = NULL;
550 if (sp->u.iocb_cmd.u.ctarg.rsp) {
551 dma_free_coherent(&vha->hw->pdev->dev,
552 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
553 sp->u.iocb_cmd.u.ctarg.rsp,
554 sp->u.iocb_cmd.u.ctarg.rsp_dma);
555 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
564 qla2x00_post_work(vha, e);
568 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
571 * Returns 0 on success.
574 qla2x00_rft_id(scsi_qla_host_t *vha)
576 struct qla_hw_data *ha = vha->hw;
578 if (IS_QLA2100(ha) || IS_QLA2200(ha))
579 return qla2x00_sns_rft_id(vha);
581 return qla_async_rftid(vha, &vha->d_id);
584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
586 int rval = QLA_MEMORY_ALLOC_FAILED;
587 struct ct_sns_req *ct_req;
589 struct ct_sns_pkt *ct_sns;
591 if (!vha->flags.online)
594 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
598 sp->type = SRB_CT_PTHRU_CMD;
600 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
602 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
603 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
605 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
606 if (!sp->u.iocb_cmd.u.ctarg.req) {
607 ql_log(ql_log_warn, vha, 0xd041,
608 "%s: Failed to allocate ct_sns request.\n",
613 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
614 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
616 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
617 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
618 ql_log(ql_log_warn, vha, 0xd042,
619 "%s: Failed to allocate ct_sns request.\n",
623 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
624 memset(ct_sns, 0, sizeof(*ct_sns));
625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
627 /* Prepare CT request */
628 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
630 /* Prepare CT arguments -- port_id, FC-4 types */
631 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
632 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
634 if (vha->flags.nvme_enabled)
635 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
637 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
638 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
639 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
640 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
641 sp->done = qla2x00_async_sns_sp_done;
643 ql_dbg(ql_dbg_disc, vha, 0xffff,
644 "Async-%s - hdl=%x portid %06x.\n",
645 sp->name, sp->handle, d_id->b24);
647 rval = qla2x00_start_sp(sp);
648 if (rval != QLA_SUCCESS) {
649 ql_dbg(ql_dbg_disc, vha, 0x2043,
650 "RFT_ID issue IOCB failed (%d).\n", rval);
661 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
665 * Returns 0 on success.
668 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
670 struct qla_hw_data *ha = vha->hw;
672 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
673 ql_dbg(ql_dbg_disc, vha, 0x2046,
674 "RFF_ID call not supported on ISP2100/ISP2200.\n");
675 return (QLA_SUCCESS);
678 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
681 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
682 u8 fc4feature, u8 fc4type)
684 int rval = QLA_MEMORY_ALLOC_FAILED;
685 struct ct_sns_req *ct_req;
687 struct ct_sns_pkt *ct_sns;
689 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
693 sp->type = SRB_CT_PTHRU_CMD;
695 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
697 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
698 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
700 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
701 if (!sp->u.iocb_cmd.u.ctarg.req) {
702 ql_log(ql_log_warn, vha, 0xd041,
703 "%s: Failed to allocate ct_sns request.\n",
708 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
709 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
711 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
712 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
713 ql_log(ql_log_warn, vha, 0xd042,
714 "%s: Failed to allocate ct_sns request.\n",
718 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
719 memset(ct_sns, 0, sizeof(*ct_sns));
720 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
722 /* Prepare CT request */
723 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
725 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
726 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
727 ct_req->req.rff_id.fc4_feature = fc4feature;
728 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
730 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
731 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
732 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
733 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
734 sp->done = qla2x00_async_sns_sp_done;
736 ql_dbg(ql_dbg_disc, vha, 0xffff,
737 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
738 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
740 rval = qla2x00_start_sp(sp);
741 if (rval != QLA_SUCCESS) {
742 ql_dbg(ql_dbg_disc, vha, 0x2047,
743 "RFF_ID issue IOCB failed (%d).\n", rval);
756 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
759 * Returns 0 on success.
762 qla2x00_rnn_id(scsi_qla_host_t *vha)
764 struct qla_hw_data *ha = vha->hw;
766 if (IS_QLA2100(ha) || IS_QLA2200(ha))
767 return qla2x00_sns_rnn_id(vha);
769 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
772 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
775 int rval = QLA_MEMORY_ALLOC_FAILED;
776 struct ct_sns_req *ct_req;
778 struct ct_sns_pkt *ct_sns;
780 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
784 sp->type = SRB_CT_PTHRU_CMD;
786 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
788 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
789 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
791 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
792 if (!sp->u.iocb_cmd.u.ctarg.req) {
793 ql_log(ql_log_warn, vha, 0xd041,
794 "%s: Failed to allocate ct_sns request.\n",
799 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
800 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
802 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
803 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
804 ql_log(ql_log_warn, vha, 0xd042,
805 "%s: Failed to allocate ct_sns request.\n",
809 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
810 memset(ct_sns, 0, sizeof(*ct_sns));
811 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
813 /* Prepare CT request */
814 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
816 /* Prepare CT arguments -- port_id, node_name */
817 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
818 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
820 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
821 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
822 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
824 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
825 sp->done = qla2x00_async_sns_sp_done;
827 ql_dbg(ql_dbg_disc, vha, 0xffff,
828 "Async-%s - hdl=%x portid %06x\n",
829 sp->name, sp->handle, d_id->b24);
831 rval = qla2x00_start_sp(sp);
832 if (rval != QLA_SUCCESS) {
833 ql_dbg(ql_dbg_disc, vha, 0x204d,
834 "RNN_ID issue IOCB failed (%d).\n", rval);
847 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
849 struct qla_hw_data *ha = vha->hw;
852 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
853 ha->mr.fw_version, qla2x00_version_str);
856 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
857 ha->fw_major_version, ha->fw_minor_version,
858 ha->fw_subminor_version, qla2x00_version_str);
862 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
865 * Returns 0 on success.
868 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
870 struct qla_hw_data *ha = vha->hw;
872 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
873 ql_dbg(ql_dbg_disc, vha, 0x2050,
874 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
875 return (QLA_SUCCESS);
878 return qla_async_rsnn_nn(vha);
881 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
883 int rval = QLA_MEMORY_ALLOC_FAILED;
884 struct ct_sns_req *ct_req;
886 struct ct_sns_pkt *ct_sns;
888 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
892 sp->type = SRB_CT_PTHRU_CMD;
893 sp->name = "rsnn_nn";
894 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
896 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
897 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
899 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
900 if (!sp->u.iocb_cmd.u.ctarg.req) {
901 ql_log(ql_log_warn, vha, 0xd041,
902 "%s: Failed to allocate ct_sns request.\n",
907 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
908 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
910 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
911 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
912 ql_log(ql_log_warn, vha, 0xd042,
913 "%s: Failed to allocate ct_sns request.\n",
917 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
918 memset(ct_sns, 0, sizeof(*ct_sns));
919 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
921 /* Prepare CT request */
922 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
924 /* Prepare CT arguments -- node_name, symbolic node_name, size */
925 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
927 /* Prepare the Symbolic Node Name */
928 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
929 sizeof(ct_req->req.rsnn_nn.sym_node_name));
930 ct_req->req.rsnn_nn.name_len =
931 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
934 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
935 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
936 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
938 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
939 sp->done = qla2x00_async_sns_sp_done;
941 ql_dbg(ql_dbg_disc, vha, 0xffff,
942 "Async-%s - hdl=%x.\n",
943 sp->name, sp->handle);
945 rval = qla2x00_start_sp(sp);
946 if (rval != QLA_SUCCESS) {
947 ql_dbg(ql_dbg_disc, vha, 0x2043,
948 "RFT_ID issue IOCB failed (%d).\n", rval);
961 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
964 * @scmd_len: Subcommand length
965 * @data_size: response size in bytes
967 * Returns a pointer to the @ha's sns_cmd.
969 static inline struct sns_cmd_pkt *
970 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
974 struct sns_cmd_pkt *sns_cmd;
975 struct qla_hw_data *ha = vha->hw;
977 sns_cmd = ha->sns_cmd;
978 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
979 wc = data_size / 2; /* Size in 16bit words. */
980 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
981 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
982 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
983 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
984 wc = (data_size - 16) / 4; /* Size in 32bit words. */
985 sns_cmd->p.cmd.size = cpu_to_le16(wc);
987 vha->qla_stats.control_requests++;
993 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995 * @fcport: fcport entry to updated
997 * This command uses the old Exectute SNS Command mailbox routine.
999 * Returns 0 on success.
1002 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1004 int rval = QLA_SUCCESS;
1005 struct qla_hw_data *ha = vha->hw;
1006 struct sns_cmd_pkt *sns_cmd;
1009 /* Prepare SNS command request. */
1010 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1011 GA_NXT_SNS_DATA_SIZE);
1013 /* Prepare SNS command arguments -- port_id. */
1014 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1015 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1016 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1018 /* Execute SNS command. */
1019 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1020 sizeof(struct sns_cmd_pkt));
1021 if (rval != QLA_SUCCESS) {
1023 ql_dbg(ql_dbg_disc, vha, 0x205f,
1024 "GA_NXT Send SNS failed (%d).\n", rval);
1025 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1026 sns_cmd->p.gan_data[9] != 0x02) {
1027 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1028 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1029 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1030 sns_cmd->p.gan_data, 16);
1031 rval = QLA_FUNCTION_FAILED;
1033 /* Populate fc_port_t entry. */
1034 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1035 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1036 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1038 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1039 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1041 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1042 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1043 fcport->d_id.b.domain = 0xf0;
1045 ql_dbg(ql_dbg_disc, vha, 0x2061,
1046 "GA_NXT entry - nn %8phN pn %8phN "
1047 "port_id=%02x%02x%02x.\n",
1048 fcport->node_name, fcport->port_name,
1049 fcport->d_id.b.domain, fcport->d_id.b.area,
1050 fcport->d_id.b.al_pa);
1057 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059 * @list: switch info entries to populate
1061 * This command uses the old Exectute SNS Command mailbox routine.
1063 * NOTE: Non-Nx_Ports are not requested.
1065 * Returns 0 on success.
1068 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1071 struct qla_hw_data *ha = vha->hw;
1074 struct sns_cmd_pkt *sns_cmd;
1075 uint16_t gid_pt_sns_data_size;
1077 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1080 /* Prepare SNS command request. */
1081 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1082 gid_pt_sns_data_size);
1084 /* Prepare SNS command arguments -- port_type. */
1085 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1087 /* Execute SNS command. */
1088 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1089 sizeof(struct sns_cmd_pkt));
1090 if (rval != QLA_SUCCESS) {
1092 ql_dbg(ql_dbg_disc, vha, 0x206d,
1093 "GID_PT Send SNS failed (%d).\n", rval);
1094 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1095 sns_cmd->p.gid_data[9] != 0x02) {
1096 ql_dbg(ql_dbg_disc, vha, 0x202f,
1097 "GID_PT failed, rejected request, gid_rsp:\n");
1098 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1099 sns_cmd->p.gid_data, 16);
1100 rval = QLA_FUNCTION_FAILED;
1102 /* Set port IDs in switch info list. */
1103 for (i = 0; i < ha->max_fibre_devices; i++) {
1104 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1105 list[i].d_id.b.domain = entry[1];
1106 list[i].d_id.b.area = entry[2];
1107 list[i].d_id.b.al_pa = entry[3];
1109 /* Last one exit. */
1110 if (entry[0] & BIT_7) {
1111 list[i].d_id.b.rsvd_1 = entry[0];
1117 * If we've used all available slots, then the switch is
1118 * reporting back more devices that we can handle with this
1119 * single call. Return a failed status, and let GA_NXT handle
1122 if (i == ha->max_fibre_devices)
1123 rval = QLA_FUNCTION_FAILED;
1130 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132 * @list: switch info entries to populate
1134 * This command uses the old Exectute SNS Command mailbox routine.
1136 * Returns 0 on success.
1139 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1141 int rval = QLA_SUCCESS;
1142 struct qla_hw_data *ha = vha->hw;
1144 struct sns_cmd_pkt *sns_cmd;
1146 for (i = 0; i < ha->max_fibre_devices; i++) {
1148 /* Prepare SNS command request. */
1149 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1150 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1152 /* Prepare SNS command arguments -- port_id. */
1153 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1154 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1155 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1157 /* Execute SNS command. */
1158 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1159 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1160 if (rval != QLA_SUCCESS) {
1162 ql_dbg(ql_dbg_disc, vha, 0x2032,
1163 "GPN_ID Send SNS failed (%d).\n", rval);
1164 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1165 sns_cmd->p.gpn_data[9] != 0x02) {
1166 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1167 "GPN_ID failed, rejected request, gpn_rsp:\n");
1168 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1169 sns_cmd->p.gpn_data, 16);
1170 rval = QLA_FUNCTION_FAILED;
1173 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1177 /* Last device exit. */
1178 if (list[i].d_id.b.rsvd_1 != 0)
1186 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188 * @list: switch info entries to populate
1190 * This command uses the old Exectute SNS Command mailbox routine.
1192 * Returns 0 on success.
1195 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1197 int rval = QLA_SUCCESS;
1198 struct qla_hw_data *ha = vha->hw;
1200 struct sns_cmd_pkt *sns_cmd;
1202 for (i = 0; i < ha->max_fibre_devices; i++) {
1204 /* Prepare SNS command request. */
1205 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1206 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1208 /* Prepare SNS command arguments -- port_id. */
1209 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1210 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1211 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1213 /* Execute SNS command. */
1214 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1215 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1216 if (rval != QLA_SUCCESS) {
1218 ql_dbg(ql_dbg_disc, vha, 0x203f,
1219 "GNN_ID Send SNS failed (%d).\n", rval);
1220 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1221 sns_cmd->p.gnn_data[9] != 0x02) {
1222 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1223 "GNN_ID failed, rejected request, gnn_rsp:\n");
1224 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1225 sns_cmd->p.gnn_data, 16);
1226 rval = QLA_FUNCTION_FAILED;
1229 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1232 ql_dbg(ql_dbg_disc, vha, 0x206e,
1233 "GID_PT entry - nn %8phN pn %8phN "
1234 "port_id=%02x%02x%02x.\n",
1235 list[i].node_name, list[i].port_name,
1236 list[i].d_id.b.domain, list[i].d_id.b.area,
1237 list[i].d_id.b.al_pa);
1240 /* Last device exit. */
1241 if (list[i].d_id.b.rsvd_1 != 0)
1249 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1252 * This command uses the old Exectute SNS Command mailbox routine.
1254 * Returns 0 on success.
1257 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1260 struct qla_hw_data *ha = vha->hw;
1261 struct sns_cmd_pkt *sns_cmd;
1264 /* Prepare SNS command request. */
1265 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1266 RFT_ID_SNS_DATA_SIZE);
1268 /* Prepare SNS command arguments -- port_id, FC-4 types */
1269 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1270 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1271 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1273 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1275 /* Execute SNS command. */
1276 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1277 sizeof(struct sns_cmd_pkt));
1278 if (rval != QLA_SUCCESS) {
1280 ql_dbg(ql_dbg_disc, vha, 0x2060,
1281 "RFT_ID Send SNS failed (%d).\n", rval);
1282 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1283 sns_cmd->p.rft_data[9] != 0x02) {
1284 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1285 "RFT_ID failed, rejected request rft_rsp:\n");
1286 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1287 sns_cmd->p.rft_data, 16);
1288 rval = QLA_FUNCTION_FAILED;
1290 ql_dbg(ql_dbg_disc, vha, 0x2073,
1291 "RFT_ID exiting normally.\n");
1298 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1301 * This command uses the old Exectute SNS Command mailbox routine.
1303 * Returns 0 on success.
1306 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1309 struct qla_hw_data *ha = vha->hw;
1310 struct sns_cmd_pkt *sns_cmd;
1313 /* Prepare SNS command request. */
1314 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1315 RNN_ID_SNS_DATA_SIZE);
1317 /* Prepare SNS command arguments -- port_id, nodename. */
1318 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1319 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1320 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1322 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1323 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1324 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1325 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1326 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1327 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1328 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1329 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1331 /* Execute SNS command. */
1332 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1333 sizeof(struct sns_cmd_pkt));
1334 if (rval != QLA_SUCCESS) {
1336 ql_dbg(ql_dbg_disc, vha, 0x204a,
1337 "RNN_ID Send SNS failed (%d).\n", rval);
1338 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1339 sns_cmd->p.rnn_data[9] != 0x02) {
1340 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1341 "RNN_ID failed, rejected request, rnn_rsp:\n");
1342 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1343 sns_cmd->p.rnn_data, 16);
1344 rval = QLA_FUNCTION_FAILED;
1346 ql_dbg(ql_dbg_disc, vha, 0x204c,
1347 "RNN_ID exiting normally.\n");
1354 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1357 * Returns 0 on success.
1360 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1363 uint16_t mb[MAILBOX_REGISTER_COUNT];
1364 struct qla_hw_data *ha = vha->hw;
1367 if (vha->flags.management_server_logged_in)
1370 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1373 if (rval == QLA_MEMORY_ALLOC_FAILED)
1374 ql_dbg(ql_dbg_disc, vha, 0x2085,
1375 "Failed management_server login: loopid=%x "
1376 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 ql_dbg(ql_dbg_disc, vha, 0x2024,
1379 "Failed management_server login: loopid=%x "
1380 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1381 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 ret = QLA_FUNCTION_FAILED;
1385 vha->flags.management_server_logged_in = 1;
1391 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393 * @req_size: request size in bytes
1394 * @rsp_size: response size in bytes
1396 * Returns a pointer to the @ha's ms_iocb.
1399 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1402 ms_iocb_entry_t *ms_pkt;
1403 struct qla_hw_data *ha = vha->hw;
1405 ms_pkt = ha->ms_iocb;
1406 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1408 ms_pkt->entry_type = MS_IOCB_TYPE;
1409 ms_pkt->entry_count = 1;
1410 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1411 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1412 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1413 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1414 ms_pkt->total_dsd_count = cpu_to_le16(2);
1415 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1416 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1418 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1419 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1421 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1422 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1428 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430 * @req_size: request size in bytes
1431 * @rsp_size: response size in bytes
1433 * Returns a pointer to the @ha's ms_iocb.
1436 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1439 struct ct_entry_24xx *ct_pkt;
1440 struct qla_hw_data *ha = vha->hw;
1442 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1443 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1445 ct_pkt->entry_type = CT_IOCB_TYPE;
1446 ct_pkt->entry_count = 1;
1447 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1448 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1449 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1450 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1452 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1454 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1455 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1457 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1458 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1459 ct_pkt->vp_index = vha->vp_idx;
1465 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1467 struct qla_hw_data *ha = vha->hw;
1468 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1469 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1471 if (IS_FWI2_CAPABLE(ha)) {
1472 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1473 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1476 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1481 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1482 * @p: CT request buffer
1484 * @rsp_size: response size in bytes
1486 * Returns a pointer to the intitialized @ct_req.
1488 static inline struct ct_sns_req *
1489 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1492 memset(p, 0, sizeof(struct ct_sns_pkt));
1494 p->p.req.header.revision = 0x01;
1495 p->p.req.header.gs_type = 0xFA;
1496 p->p.req.header.gs_subtype = 0x10;
1497 p->p.req.command = cpu_to_be16(cmd);
1498 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1504 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1507 * Returns 0 on success.
1510 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1515 ms_iocb_entry_t *ms_pkt;
1516 struct ct_sns_req *ct_req;
1517 struct ct_sns_rsp *ct_rsp;
1519 struct ct_fdmi_hba_attr *eiter;
1520 struct qla_hw_data *ha = vha->hw;
1523 /* Prepare common MS IOCB */
1524 /* Request size adjusted after CT preparation */
1525 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1527 /* Prepare CT request */
1528 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1529 ct_rsp = &ha->ct_sns->p.rsp;
1531 /* Prepare FDMI command arguments -- attribute block, attributes. */
1532 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1533 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1534 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1535 size = 2 * WWN_SIZE + 4 + 4;
1538 ct_req->req.rhba.attrs.count =
1539 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1540 entries = &ct_req->req;
1543 eiter = entries + size;
1544 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1545 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1546 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1547 size += 4 + WWN_SIZE;
1549 ql_dbg(ql_dbg_disc, vha, 0x2025,
1550 "NodeName = %8phN.\n", eiter->a.node_name);
1553 eiter = entries + size;
1554 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1555 alen = strlen(QLA2XXX_MANUFACTURER);
1556 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1557 "%s", "QLogic Corporation");
1558 alen += 4 - (alen & 3);
1559 eiter->len = cpu_to_be16(4 + alen);
1562 ql_dbg(ql_dbg_disc, vha, 0x2026,
1563 "Manufacturer = %s.\n", eiter->a.manufacturer);
1565 /* Serial number. */
1566 eiter = entries + size;
1567 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1568 if (IS_FWI2_CAPABLE(ha))
1569 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1570 sizeof(eiter->a.serial_num));
1572 sn = ((ha->serial0 & 0x1f) << 16) |
1573 (ha->serial2 << 8) | ha->serial1;
1574 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1575 "%c%05d", 'A' + sn / 100000, sn % 100000);
1577 alen = strlen(eiter->a.serial_num);
1578 alen += 4 - (alen & 3);
1579 eiter->len = cpu_to_be16(4 + alen);
1582 ql_dbg(ql_dbg_disc, vha, 0x2027,
1583 "Serial no. = %s.\n", eiter->a.serial_num);
1586 eiter = entries + size;
1587 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1588 snprintf(eiter->a.model, sizeof(eiter->a.model),
1589 "%s", ha->model_number);
1590 alen = strlen(eiter->a.model);
1591 alen += 4 - (alen & 3);
1592 eiter->len = cpu_to_be16(4 + alen);
1595 ql_dbg(ql_dbg_disc, vha, 0x2028,
1596 "Model Name = %s.\n", eiter->a.model);
1598 /* Model description. */
1599 eiter = entries + size;
1600 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1601 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1602 "%s", ha->model_desc);
1603 alen = strlen(eiter->a.model_desc);
1604 alen += 4 - (alen & 3);
1605 eiter->len = cpu_to_be16(4 + alen);
1608 ql_dbg(ql_dbg_disc, vha, 0x2029,
1609 "Model Desc = %s.\n", eiter->a.model_desc);
1611 /* Hardware version. */
1612 eiter = entries + size;
1613 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1614 if (!IS_FWI2_CAPABLE(ha)) {
1615 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1616 "HW:%s", ha->adapter_id);
1617 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1618 sizeof(eiter->a.hw_version))) {
1620 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1621 sizeof(eiter->a.hw_version))) {
1624 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1625 "HW:%s", ha->adapter_id);
1627 alen = strlen(eiter->a.hw_version);
1628 alen += 4 - (alen & 3);
1629 eiter->len = cpu_to_be16(4 + alen);
1632 ql_dbg(ql_dbg_disc, vha, 0x202a,
1633 "Hardware ver = %s.\n", eiter->a.hw_version);
1635 /* Driver version. */
1636 eiter = entries + size;
1637 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1638 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1639 "%s", qla2x00_version_str);
1640 alen = strlen(eiter->a.driver_version);
1641 alen += 4 - (alen & 3);
1642 eiter->len = cpu_to_be16(4 + alen);
1645 ql_dbg(ql_dbg_disc, vha, 0x202b,
1646 "Driver ver = %s.\n", eiter->a.driver_version);
1648 /* Option ROM version. */
1649 eiter = entries + size;
1650 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1651 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1652 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1653 alen = strlen(eiter->a.orom_version);
1654 alen += 4 - (alen & 3);
1655 eiter->len = cpu_to_be16(4 + alen);
1658 ql_dbg(ql_dbg_disc, vha , 0x202c,
1659 "Optrom vers = %s.\n", eiter->a.orom_version);
1661 /* Firmware version */
1662 eiter = entries + size;
1663 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1664 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1665 sizeof(eiter->a.fw_version));
1666 alen = strlen(eiter->a.fw_version);
1667 alen += 4 - (alen & 3);
1668 eiter->len = cpu_to_be16(4 + alen);
1671 ql_dbg(ql_dbg_disc, vha, 0x202d,
1672 "Firmware vers = %s.\n", eiter->a.fw_version);
1674 /* Update MS request size. */
1675 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1677 ql_dbg(ql_dbg_disc, vha, 0x202e,
1678 "RHBA identifier = %8phN size=%d.\n",
1679 ct_req->req.rhba.hba_identifier, size);
1680 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1683 /* Execute MS IOCB */
1684 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1685 sizeof(ms_iocb_entry_t));
1686 if (rval != QLA_SUCCESS) {
1688 ql_dbg(ql_dbg_disc, vha, 0x2030,
1689 "RHBA issue IOCB failed (%d).\n", rval);
1690 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1692 rval = QLA_FUNCTION_FAILED;
1693 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1694 ct_rsp->header.explanation_code ==
1695 CT_EXPL_ALREADY_REGISTERED) {
1696 ql_dbg(ql_dbg_disc, vha, 0x2034,
1697 "HBA already registered.\n");
1698 rval = QLA_ALREADY_REGISTERED;
1700 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1701 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1702 ct_rsp->header.reason_code,
1703 ct_rsp->header.explanation_code);
1706 ql_dbg(ql_dbg_disc, vha, 0x2035,
1707 "RHBA exiting normally.\n");
1714 * qla2x00_fdmi_rpa() - perform RPA registration
1717 * Returns 0 on success.
1720 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1724 struct qla_hw_data *ha = vha->hw;
1725 ms_iocb_entry_t *ms_pkt;
1726 struct ct_sns_req *ct_req;
1727 struct ct_sns_rsp *ct_rsp;
1729 struct ct_fdmi_port_attr *eiter;
1730 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1731 struct new_utsname *p_sysid = NULL;
1734 /* Prepare common MS IOCB */
1735 /* Request size adjusted after CT preparation */
1736 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1738 /* Prepare CT request */
1739 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1741 ct_rsp = &ha->ct_sns->p.rsp;
1743 /* Prepare FDMI command arguments -- attribute block, attributes. */
1744 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1745 size = WWN_SIZE + 4;
1748 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1749 entries = &ct_req->req;
1752 eiter = entries + size;
1753 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1754 eiter->len = cpu_to_be16(4 + 32);
1755 eiter->a.fc4_types[2] = 0x01;
1758 ql_dbg(ql_dbg_disc, vha, 0x2039,
1759 "FC4_TYPES=%02x %02x.\n",
1760 eiter->a.fc4_types[2],
1761 eiter->a.fc4_types[1]);
1763 /* Supported speed. */
1764 eiter = entries + size;
1765 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1766 eiter->len = cpu_to_be16(4 + 4);
1767 if (IS_CNA_CAPABLE(ha))
1768 eiter->a.sup_speed = cpu_to_be32(
1769 FDMI_PORT_SPEED_10GB);
1770 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1771 eiter->a.sup_speed = cpu_to_be32(
1772 FDMI_PORT_SPEED_32GB|
1773 FDMI_PORT_SPEED_16GB|
1774 FDMI_PORT_SPEED_8GB);
1775 else if (IS_QLA2031(ha))
1776 eiter->a.sup_speed = cpu_to_be32(
1777 FDMI_PORT_SPEED_16GB|
1778 FDMI_PORT_SPEED_8GB|
1779 FDMI_PORT_SPEED_4GB);
1780 else if (IS_QLA25XX(ha))
1781 eiter->a.sup_speed = cpu_to_be32(
1782 FDMI_PORT_SPEED_8GB|
1783 FDMI_PORT_SPEED_4GB|
1784 FDMI_PORT_SPEED_2GB|
1785 FDMI_PORT_SPEED_1GB);
1786 else if (IS_QLA24XX_TYPE(ha))
1787 eiter->a.sup_speed = cpu_to_be32(
1788 FDMI_PORT_SPEED_4GB|
1789 FDMI_PORT_SPEED_2GB|
1790 FDMI_PORT_SPEED_1GB);
1791 else if (IS_QLA23XX(ha))
1792 eiter->a.sup_speed = cpu_to_be32(
1793 FDMI_PORT_SPEED_2GB|
1794 FDMI_PORT_SPEED_1GB);
1796 eiter->a.sup_speed = cpu_to_be32(
1797 FDMI_PORT_SPEED_1GB);
1800 ql_dbg(ql_dbg_disc, vha, 0x203a,
1801 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1803 /* Current speed. */
1804 eiter = entries + size;
1805 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1806 eiter->len = cpu_to_be16(4 + 4);
1807 switch (ha->link_data_rate) {
1808 case PORT_SPEED_1GB:
1809 eiter->a.cur_speed =
1810 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1812 case PORT_SPEED_2GB:
1813 eiter->a.cur_speed =
1814 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1816 case PORT_SPEED_4GB:
1817 eiter->a.cur_speed =
1818 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1820 case PORT_SPEED_8GB:
1821 eiter->a.cur_speed =
1822 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1824 case PORT_SPEED_10GB:
1825 eiter->a.cur_speed =
1826 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1828 case PORT_SPEED_16GB:
1829 eiter->a.cur_speed =
1830 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1832 case PORT_SPEED_32GB:
1833 eiter->a.cur_speed =
1834 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1837 eiter->a.cur_speed =
1838 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1843 ql_dbg(ql_dbg_disc, vha, 0x203b,
1844 "Current_Speed=%x.\n", eiter->a.cur_speed);
1846 /* Max frame size. */
1847 eiter = entries + size;
1848 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1849 eiter->len = cpu_to_be16(4 + 4);
1850 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1851 le16_to_cpu(icb24->frame_payload_size) :
1852 le16_to_cpu(ha->init_cb->frame_payload_size);
1853 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1856 ql_dbg(ql_dbg_disc, vha, 0x203c,
1857 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1859 /* OS device name. */
1860 eiter = entries + size;
1861 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1862 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1863 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1864 alen = strlen(eiter->a.os_dev_name);
1865 alen += 4 - (alen & 3);
1866 eiter->len = cpu_to_be16(4 + alen);
1869 ql_dbg(ql_dbg_disc, vha, 0x204b,
1870 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1873 eiter = entries + size;
1874 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1875 p_sysid = utsname();
1877 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1878 "%s", p_sysid->nodename);
1880 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1881 "%s", fc_host_system_hostname(vha->host));
1883 alen = strlen(eiter->a.host_name);
1884 alen += 4 - (alen & 3);
1885 eiter->len = cpu_to_be16(4 + alen);
1888 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1890 /* Update MS request size. */
1891 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1893 ql_dbg(ql_dbg_disc, vha, 0x203e,
1894 "RPA portname %016llx, size = %d.\n",
1895 wwn_to_u64(ct_req->req.rpa.port_name), size);
1896 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1899 /* Execute MS IOCB */
1900 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1901 sizeof(ms_iocb_entry_t));
1902 if (rval != QLA_SUCCESS) {
1904 ql_dbg(ql_dbg_disc, vha, 0x2040,
1905 "RPA issue IOCB failed (%d).\n", rval);
1906 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1908 rval = QLA_FUNCTION_FAILED;
1909 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1910 ct_rsp->header.explanation_code ==
1911 CT_EXPL_ALREADY_REGISTERED) {
1912 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1913 "RPA already registered.\n");
1914 rval = QLA_ALREADY_REGISTERED;
1918 ql_dbg(ql_dbg_disc, vha, 0x2041,
1919 "RPA exiting normally.\n");
1926 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1929 * Returns 0 on success.
1932 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1936 ms_iocb_entry_t *ms_pkt;
1937 struct ct_sns_req *ct_req;
1938 struct ct_sns_rsp *ct_rsp;
1940 struct ct_fdmiv2_hba_attr *eiter;
1941 struct qla_hw_data *ha = vha->hw;
1942 struct new_utsname *p_sysid = NULL;
1945 /* Prepare common MS IOCB */
1946 /* Request size adjusted after CT preparation */
1947 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1949 /* Prepare CT request */
1950 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1952 ct_rsp = &ha->ct_sns->p.rsp;
1954 /* Prepare FDMI command arguments -- attribute block, attributes. */
1955 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1956 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1957 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1958 size = 2 * WWN_SIZE + 4 + 4;
1961 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1962 entries = &ct_req->req;
1965 eiter = entries + size;
1966 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1967 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1968 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1969 size += 4 + WWN_SIZE;
1971 ql_dbg(ql_dbg_disc, vha, 0x207d,
1972 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1975 eiter = entries + size;
1976 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1977 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1978 "%s", "QLogic Corporation");
1979 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1980 alen = strlen(eiter->a.manufacturer);
1981 alen += 4 - (alen & 3);
1982 eiter->len = cpu_to_be16(4 + alen);
1985 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1986 "Manufacturer = %s.\n", eiter->a.manufacturer);
1988 /* Serial number. */
1989 eiter = entries + size;
1990 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1991 if (IS_FWI2_CAPABLE(ha))
1992 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1993 sizeof(eiter->a.serial_num));
1995 sn = ((ha->serial0 & 0x1f) << 16) |
1996 (ha->serial2 << 8) | ha->serial1;
1997 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1998 "%c%05d", 'A' + sn / 100000, sn % 100000);
2000 alen = strlen(eiter->a.serial_num);
2001 alen += 4 - (alen & 3);
2002 eiter->len = cpu_to_be16(4 + alen);
2005 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2006 "Serial no. = %s.\n", eiter->a.serial_num);
2009 eiter = entries + size;
2010 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2011 snprintf(eiter->a.model, sizeof(eiter->a.model),
2012 "%s", ha->model_number);
2013 alen = strlen(eiter->a.model);
2014 alen += 4 - (alen & 3);
2015 eiter->len = cpu_to_be16(4 + alen);
2018 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2019 "Model Name = %s.\n", eiter->a.model);
2021 /* Model description. */
2022 eiter = entries + size;
2023 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2024 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2025 "%s", ha->model_desc);
2026 alen = strlen(eiter->a.model_desc);
2027 alen += 4 - (alen & 3);
2028 eiter->len = cpu_to_be16(4 + alen);
2031 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2032 "Model Desc = %s.\n", eiter->a.model_desc);
2034 /* Hardware version. */
2035 eiter = entries + size;
2036 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2037 if (!IS_FWI2_CAPABLE(ha)) {
2038 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2039 "HW:%s", ha->adapter_id);
2040 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2041 sizeof(eiter->a.hw_version))) {
2043 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2044 sizeof(eiter->a.hw_version))) {
2047 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2048 "HW:%s", ha->adapter_id);
2050 alen = strlen(eiter->a.hw_version);
2051 alen += 4 - (alen & 3);
2052 eiter->len = cpu_to_be16(4 + alen);
2055 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2056 "Hardware ver = %s.\n", eiter->a.hw_version);
2058 /* Driver version. */
2059 eiter = entries + size;
2060 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2061 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2062 "%s", qla2x00_version_str);
2063 alen = strlen(eiter->a.driver_version);
2064 alen += 4 - (alen & 3);
2065 eiter->len = cpu_to_be16(4 + alen);
2068 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2069 "Driver ver = %s.\n", eiter->a.driver_version);
2071 /* Option ROM version. */
2072 eiter = entries + size;
2073 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2074 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2075 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2076 alen = strlen(eiter->a.orom_version);
2077 alen += 4 - (alen & 3);
2078 eiter->len = cpu_to_be16(4 + alen);
2081 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2082 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2083 eiter->a.orom_version[0]);
2085 /* Firmware version */
2086 eiter = entries + size;
2087 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2088 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2089 sizeof(eiter->a.fw_version));
2090 alen = strlen(eiter->a.fw_version);
2091 alen += 4 - (alen & 3);
2092 eiter->len = cpu_to_be16(4 + alen);
2095 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2096 "Firmware vers = %s.\n", eiter->a.fw_version);
2098 /* OS Name and Version */
2099 eiter = entries + size;
2100 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2101 p_sysid = utsname();
2103 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2105 p_sysid->sysname, p_sysid->release, p_sysid->version);
2107 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2108 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2110 alen = strlen(eiter->a.os_version);
2111 alen += 4 - (alen & 3);
2112 eiter->len = cpu_to_be16(4 + alen);
2115 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2116 "OS Name and Version = %s.\n", eiter->a.os_version);
2118 /* MAX CT Payload Length */
2119 eiter = entries + size;
2120 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2121 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2122 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2123 eiter->len = cpu_to_be16(4 + 4);
2126 ql_dbg(ql_dbg_disc, vha, 0x20af,
2127 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2129 /* Node Sybolic Name */
2130 eiter = entries + size;
2131 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2132 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2133 sizeof(eiter->a.sym_name));
2134 alen = strlen(eiter->a.sym_name);
2135 alen += 4 - (alen & 3);
2136 eiter->len = cpu_to_be16(4 + alen);
2139 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2140 "Symbolic Name = %s.\n", eiter->a.sym_name);
2143 eiter = entries + size;
2144 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2145 eiter->a.vendor_id = cpu_to_be32(0x1077);
2146 eiter->len = cpu_to_be16(4 + 4);
2149 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2150 "Vendor Id = %x.\n", eiter->a.vendor_id);
2153 eiter = entries + size;
2154 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2155 eiter->a.num_ports = cpu_to_be32(1);
2156 eiter->len = cpu_to_be16(4 + 4);
2159 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2160 "Port Num = %x.\n", eiter->a.num_ports);
2163 eiter = entries + size;
2164 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2165 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2166 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2167 size += 4 + WWN_SIZE;
2169 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2170 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2173 eiter = entries + size;
2174 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2175 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2176 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2177 alen = strlen(eiter->a.bios_name);
2178 alen += 4 - (alen & 3);
2179 eiter->len = cpu_to_be16(4 + alen);
2182 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2183 "BIOS Name = %s\n", eiter->a.bios_name);
2185 /* Vendor Identifier */
2186 eiter = entries + size;
2187 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2188 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2190 alen = strlen(eiter->a.vendor_identifier);
2191 alen += 4 - (alen & 3);
2192 eiter->len = cpu_to_be16(4 + alen);
2195 ql_dbg(ql_dbg_disc, vha, 0x201b,
2196 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2198 /* Update MS request size. */
2199 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2201 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2202 "RHBA identifier = %016llx.\n",
2203 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2204 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2207 /* Execute MS IOCB */
2208 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2209 sizeof(ms_iocb_entry_t));
2210 if (rval != QLA_SUCCESS) {
2212 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2213 "RHBA issue IOCB failed (%d).\n", rval);
2214 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2216 rval = QLA_FUNCTION_FAILED;
2218 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2219 ct_rsp->header.explanation_code ==
2220 CT_EXPL_ALREADY_REGISTERED) {
2221 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2222 "HBA already registered.\n");
2223 rval = QLA_ALREADY_REGISTERED;
2225 ql_dbg(ql_dbg_disc, vha, 0x2016,
2226 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2227 ct_rsp->header.reason_code,
2228 ct_rsp->header.explanation_code);
2231 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2232 "RHBA FDMI V2 exiting normally.\n");
2239 * qla2x00_fdmi_dhba() -
2242 * Returns 0 on success.
2245 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2248 struct qla_hw_data *ha = vha->hw;
2249 ms_iocb_entry_t *ms_pkt;
2250 struct ct_sns_req *ct_req;
2251 struct ct_sns_rsp *ct_rsp;
2254 /* Prepare common MS IOCB */
2255 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2258 /* Prepare CT request */
2259 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2260 ct_rsp = &ha->ct_sns->p.rsp;
2262 /* Prepare FDMI command arguments -- portname. */
2263 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2265 ql_dbg(ql_dbg_disc, vha, 0x2036,
2266 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2268 /* Execute MS IOCB */
2269 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2270 sizeof(ms_iocb_entry_t));
2271 if (rval != QLA_SUCCESS) {
2273 ql_dbg(ql_dbg_disc, vha, 0x2037,
2274 "DHBA issue IOCB failed (%d).\n", rval);
2275 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2277 rval = QLA_FUNCTION_FAILED;
2279 ql_dbg(ql_dbg_disc, vha, 0x2038,
2280 "DHBA exiting normally.\n");
2287 * qla2x00_fdmiv2_rpa() -
2290 * Returns 0 on success.
2293 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2297 struct qla_hw_data *ha = vha->hw;
2298 ms_iocb_entry_t *ms_pkt;
2299 struct ct_sns_req *ct_req;
2300 struct ct_sns_rsp *ct_rsp;
2302 struct ct_fdmiv2_port_attr *eiter;
2303 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2304 struct new_utsname *p_sysid = NULL;
2307 /* Prepare common MS IOCB */
2308 /* Request size adjusted after CT preparation */
2309 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2311 /* Prepare CT request */
2312 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2313 ct_rsp = &ha->ct_sns->p.rsp;
2315 /* Prepare FDMI command arguments -- attribute block, attributes. */
2316 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2317 size = WWN_SIZE + 4;
2320 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2321 entries = &ct_req->req;
2324 eiter = entries + size;
2325 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2326 eiter->len = cpu_to_be16(4 + 32);
2327 eiter->a.fc4_types[2] = 0x01;
2330 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2331 "FC4_TYPES=%02x %02x.\n",
2332 eiter->a.fc4_types[2],
2333 eiter->a.fc4_types[1]);
2335 if (vha->flags.nvme_enabled) {
2336 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2337 ql_dbg(ql_dbg_disc, vha, 0x211f,
2338 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2339 eiter->a.fc4_types[6]);
2342 /* Supported speed. */
2343 eiter = entries + size;
2344 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2345 eiter->len = cpu_to_be16(4 + 4);
2346 if (IS_CNA_CAPABLE(ha))
2347 eiter->a.sup_speed = cpu_to_be32(
2348 FDMI_PORT_SPEED_10GB);
2349 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
2350 eiter->a.sup_speed = cpu_to_be32(
2351 FDMI_PORT_SPEED_32GB|
2352 FDMI_PORT_SPEED_16GB|
2353 FDMI_PORT_SPEED_8GB);
2354 else if (IS_QLA2031(ha))
2355 eiter->a.sup_speed = cpu_to_be32(
2356 FDMI_PORT_SPEED_16GB|
2357 FDMI_PORT_SPEED_8GB|
2358 FDMI_PORT_SPEED_4GB);
2359 else if (IS_QLA25XX(ha))
2360 eiter->a.sup_speed = cpu_to_be32(
2361 FDMI_PORT_SPEED_8GB|
2362 FDMI_PORT_SPEED_4GB|
2363 FDMI_PORT_SPEED_2GB|
2364 FDMI_PORT_SPEED_1GB);
2365 else if (IS_QLA24XX_TYPE(ha))
2366 eiter->a.sup_speed = cpu_to_be32(
2367 FDMI_PORT_SPEED_4GB|
2368 FDMI_PORT_SPEED_2GB|
2369 FDMI_PORT_SPEED_1GB);
2370 else if (IS_QLA23XX(ha))
2371 eiter->a.sup_speed = cpu_to_be32(
2372 FDMI_PORT_SPEED_2GB|
2373 FDMI_PORT_SPEED_1GB);
2375 eiter->a.sup_speed = cpu_to_be32(
2376 FDMI_PORT_SPEED_1GB);
2379 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2380 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2382 /* Current speed. */
2383 eiter = entries + size;
2384 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2385 eiter->len = cpu_to_be16(4 + 4);
2386 switch (ha->link_data_rate) {
2387 case PORT_SPEED_1GB:
2388 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2390 case PORT_SPEED_2GB:
2391 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2393 case PORT_SPEED_4GB:
2394 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2396 case PORT_SPEED_8GB:
2397 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2399 case PORT_SPEED_10GB:
2400 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2402 case PORT_SPEED_16GB:
2403 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2405 case PORT_SPEED_32GB:
2406 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2409 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2414 ql_dbg(ql_dbg_disc, vha, 0x2017,
2415 "Current_Speed = %x.\n", eiter->a.cur_speed);
2417 /* Max frame size. */
2418 eiter = entries + size;
2419 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2420 eiter->len = cpu_to_be16(4 + 4);
2421 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2422 le16_to_cpu(icb24->frame_payload_size) :
2423 le16_to_cpu(ha->init_cb->frame_payload_size);
2424 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2427 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2428 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2430 /* OS device name. */
2431 eiter = entries + size;
2432 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2433 alen = strlen(QLA2XXX_DRIVER_NAME);
2434 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2435 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2436 alen += 4 - (alen & 3);
2437 eiter->len = cpu_to_be16(4 + alen);
2440 ql_dbg(ql_dbg_disc, vha, 0x20be,
2441 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2444 eiter = entries + size;
2445 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2446 p_sysid = utsname();
2448 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2449 "%s", p_sysid->nodename);
2451 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2452 "%s", fc_host_system_hostname(vha->host));
2454 alen = strlen(eiter->a.host_name);
2455 alen += 4 - (alen & 3);
2456 eiter->len = cpu_to_be16(4 + alen);
2459 ql_dbg(ql_dbg_disc, vha, 0x201a,
2460 "HostName=%s.\n", eiter->a.host_name);
2463 eiter = entries + size;
2464 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2465 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2466 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2467 size += 4 + WWN_SIZE;
2469 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2470 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2473 eiter = entries + size;
2474 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2475 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2476 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2477 size += 4 + WWN_SIZE;
2479 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2480 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2482 /* Port Symbolic Name */
2483 eiter = entries + size;
2484 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2485 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2486 sizeof(eiter->a.port_sym_name));
2487 alen = strlen(eiter->a.port_sym_name);
2488 alen += 4 - (alen & 3);
2489 eiter->len = cpu_to_be16(4 + alen);
2492 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2493 "port symbolic name = %s\n", eiter->a.port_sym_name);
2496 eiter = entries + size;
2497 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2498 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2499 eiter->len = cpu_to_be16(4 + 4);
2502 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2503 "Port Type = %x.\n", eiter->a.port_type);
2505 /* Class of Service */
2506 eiter = entries + size;
2507 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2508 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2509 eiter->len = cpu_to_be16(4 + 4);
2512 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2513 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2515 /* Port Fabric Name */
2516 eiter = entries + size;
2517 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2518 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2519 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2520 size += 4 + WWN_SIZE;
2522 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2523 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2526 eiter = entries + size;
2527 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2528 eiter->a.port_fc4_type[0] = 0;
2529 eiter->a.port_fc4_type[1] = 0;
2530 eiter->a.port_fc4_type[2] = 1;
2531 eiter->a.port_fc4_type[3] = 0;
2532 eiter->len = cpu_to_be16(4 + 32);
2535 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2536 "Port Active FC4 Type = %02x %02x.\n",
2537 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2539 if (vha->flags.nvme_enabled) {
2540 eiter->a.port_fc4_type[4] = 0;
2541 eiter->a.port_fc4_type[5] = 0;
2542 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2543 ql_dbg(ql_dbg_disc, vha, 0x2120,
2544 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2545 eiter->a.port_fc4_type[6]);
2549 eiter = entries + size;
2550 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2551 eiter->a.port_state = cpu_to_be32(1);
2552 eiter->len = cpu_to_be16(4 + 4);
2555 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2556 "Port State = %x.\n", eiter->a.port_state);
2558 /* Number of Ports */
2559 eiter = entries + size;
2560 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2561 eiter->a.num_ports = cpu_to_be32(1);
2562 eiter->len = cpu_to_be16(4 + 4);
2565 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2566 "Number of ports = %x.\n", eiter->a.num_ports);
2569 eiter = entries + size;
2570 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2571 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2572 eiter->len = cpu_to_be16(4 + 4);
2575 ql_dbg(ql_dbg_disc, vha, 0x201c,
2576 "Port Id = %x.\n", eiter->a.port_id);
2578 /* Update MS request size. */
2579 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2581 ql_dbg(ql_dbg_disc, vha, 0x2018,
2582 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2583 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2586 /* Execute MS IOCB */
2587 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2588 sizeof(ms_iocb_entry_t));
2589 if (rval != QLA_SUCCESS) {
2591 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2592 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2593 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2595 rval = QLA_FUNCTION_FAILED;
2596 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2597 ct_rsp->header.explanation_code ==
2598 CT_EXPL_ALREADY_REGISTERED) {
2599 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2600 "RPA FDMI v2 already registered\n");
2601 rval = QLA_ALREADY_REGISTERED;
2603 ql_dbg(ql_dbg_disc, vha, 0x2020,
2604 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2605 ct_rsp->header.reason_code,
2606 ct_rsp->header.explanation_code);
2609 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2610 "RPA FDMI V2 exiting normally.\n");
2617 * qla2x00_fdmi_register() -
2620 * Returns 0 on success.
2623 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2625 int rval = QLA_FUNCTION_FAILED;
2626 struct qla_hw_data *ha = vha->hw;
2628 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2630 return QLA_FUNCTION_FAILED;
2632 rval = qla2x00_mgmt_svr_login(vha);
2636 rval = qla2x00_fdmiv2_rhba(vha);
2638 if (rval != QLA_ALREADY_REGISTERED)
2641 rval = qla2x00_fdmi_dhba(vha);
2645 rval = qla2x00_fdmiv2_rhba(vha);
2649 rval = qla2x00_fdmiv2_rpa(vha);
2656 rval = qla2x00_fdmi_rhba(vha);
2658 if (rval != QLA_ALREADY_REGISTERED)
2661 rval = qla2x00_fdmi_dhba(vha);
2665 rval = qla2x00_fdmi_rhba(vha);
2669 rval = qla2x00_fdmi_rpa(vha);
2675 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2677 * @list: switch info entries to populate
2679 * Returns 0 on success.
2682 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2684 int rval = QLA_SUCCESS;
2686 struct qla_hw_data *ha = vha->hw;
2687 ms_iocb_entry_t *ms_pkt;
2688 struct ct_sns_req *ct_req;
2689 struct ct_sns_rsp *ct_rsp;
2692 if (!IS_IIDMA_CAPABLE(ha))
2693 return QLA_FUNCTION_FAILED;
2695 arg.iocb = ha->ms_iocb;
2696 arg.req_dma = ha->ct_sns_dma;
2697 arg.rsp_dma = ha->ct_sns_dma;
2698 arg.req_size = GFPN_ID_REQ_SIZE;
2699 arg.rsp_size = GFPN_ID_RSP_SIZE;
2700 arg.nport_handle = NPH_SNS;
2702 for (i = 0; i < ha->max_fibre_devices; i++) {
2704 /* Prepare common MS IOCB */
2705 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2707 /* Prepare CT request */
2708 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2710 ct_rsp = &ha->ct_sns->p.rsp;
2712 /* Prepare CT arguments -- port_id */
2713 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2715 /* Execute MS IOCB */
2716 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2717 sizeof(ms_iocb_entry_t));
2718 if (rval != QLA_SUCCESS) {
2720 ql_dbg(ql_dbg_disc, vha, 0x2023,
2721 "GFPN_ID issue IOCB failed (%d).\n", rval);
2723 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2724 "GFPN_ID") != QLA_SUCCESS) {
2725 rval = QLA_FUNCTION_FAILED;
2728 /* Save fabric portname */
2729 memcpy(list[i].fabric_port_name,
2730 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2733 /* Last device exit. */
2734 if (list[i].d_id.b.rsvd_1 != 0)
2742 static inline struct ct_sns_req *
2743 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2746 memset(p, 0, sizeof(struct ct_sns_pkt));
2748 p->p.req.header.revision = 0x01;
2749 p->p.req.header.gs_type = 0xFA;
2750 p->p.req.header.gs_subtype = 0x01;
2751 p->p.req.command = cpu_to_be16(cmd);
2752 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2758 qla2x00_port_speed_capability(uint16_t speed)
2762 return PORT_SPEED_1GB;
2764 return PORT_SPEED_2GB;
2766 return PORT_SPEED_4GB;
2768 return PORT_SPEED_10GB;
2770 return PORT_SPEED_8GB;
2772 return PORT_SPEED_16GB;
2774 return PORT_SPEED_32GB;
2776 return PORT_SPEED_64GB;
2778 return PORT_SPEED_UNKNOWN;
2783 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2785 * @list: switch info entries to populate
2787 * Returns 0 on success.
2790 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2794 struct qla_hw_data *ha = vha->hw;
2795 ms_iocb_entry_t *ms_pkt;
2796 struct ct_sns_req *ct_req;
2797 struct ct_sns_rsp *ct_rsp;
2800 if (!IS_IIDMA_CAPABLE(ha))
2801 return QLA_FUNCTION_FAILED;
2802 if (!ha->flags.gpsc_supported)
2803 return QLA_FUNCTION_FAILED;
2805 rval = qla2x00_mgmt_svr_login(vha);
2809 arg.iocb = ha->ms_iocb;
2810 arg.req_dma = ha->ct_sns_dma;
2811 arg.rsp_dma = ha->ct_sns_dma;
2812 arg.req_size = GPSC_REQ_SIZE;
2813 arg.rsp_size = GPSC_RSP_SIZE;
2814 arg.nport_handle = vha->mgmt_svr_loop_id;
2816 for (i = 0; i < ha->max_fibre_devices; i++) {
2818 /* Prepare common MS IOCB */
2819 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2821 /* Prepare CT request */
2822 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2824 ct_rsp = &ha->ct_sns->p.rsp;
2826 /* Prepare CT arguments -- port_name */
2827 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2830 /* Execute MS IOCB */
2831 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2832 sizeof(ms_iocb_entry_t));
2833 if (rval != QLA_SUCCESS) {
2835 ql_dbg(ql_dbg_disc, vha, 0x2059,
2836 "GPSC issue IOCB failed (%d).\n", rval);
2837 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2838 "GPSC")) != QLA_SUCCESS) {
2839 /* FM command unsupported? */
2840 if (rval == QLA_INVALID_COMMAND &&
2841 (ct_rsp->header.reason_code ==
2842 CT_REASON_INVALID_COMMAND_CODE ||
2843 ct_rsp->header.reason_code ==
2844 CT_REASON_COMMAND_UNSUPPORTED)) {
2845 ql_dbg(ql_dbg_disc, vha, 0x205a,
2846 "GPSC command unsupported, disabling "
2848 ha->flags.gpsc_supported = 0;
2849 rval = QLA_FUNCTION_FAILED;
2852 rval = QLA_FUNCTION_FAILED;
2854 list->fp_speed = qla2x00_port_speed_capability(
2855 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2856 ql_dbg(ql_dbg_disc, vha, 0x205b,
2857 "GPSC ext entry - fpn "
2858 "%8phN speeds=%04x speed=%04x.\n",
2859 list[i].fabric_port_name,
2860 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2861 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2864 /* Last device exit. */
2865 if (list[i].d_id.b.rsvd_1 != 0)
2873 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2876 * @list: switch info entries to populate
2880 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2885 ms_iocb_entry_t *ms_pkt;
2886 struct ct_sns_req *ct_req;
2887 struct ct_sns_rsp *ct_rsp;
2888 struct qla_hw_data *ha = vha->hw;
2889 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2892 for (i = 0; i < ha->max_fibre_devices; i++) {
2893 /* Set default FC4 Type as UNKNOWN so the default is to
2894 * Process this port */
2895 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2897 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2898 if (!IS_FWI2_CAPABLE(ha))
2901 arg.iocb = ha->ms_iocb;
2902 arg.req_dma = ha->ct_sns_dma;
2903 arg.rsp_dma = ha->ct_sns_dma;
2904 arg.req_size = GFF_ID_REQ_SIZE;
2905 arg.rsp_size = GFF_ID_RSP_SIZE;
2906 arg.nport_handle = NPH_SNS;
2908 /* Prepare common MS IOCB */
2909 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2911 /* Prepare CT request */
2912 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2914 ct_rsp = &ha->ct_sns->p.rsp;
2916 /* Prepare CT arguments -- port_id */
2917 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2919 /* Execute MS IOCB */
2920 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2921 sizeof(ms_iocb_entry_t));
2923 if (rval != QLA_SUCCESS) {
2924 ql_dbg(ql_dbg_disc, vha, 0x205c,
2925 "GFF_ID issue IOCB failed (%d).\n", rval);
2926 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2927 "GFF_ID") != QLA_SUCCESS) {
2928 ql_dbg(ql_dbg_disc, vha, 0x205d,
2929 "GFF_ID IOCB status had a failure status code.\n");
2932 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2933 fcp_scsi_features &= 0x0f;
2935 if (fcp_scsi_features) {
2936 list[i].fc4_type = FS_FC4TYPE_FCP;
2937 list[i].fc4_features = fcp_scsi_features;
2941 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2942 nvme_features &= 0xf;
2944 if (nvme_features) {
2945 list[i].fc4_type |= FS_FC4TYPE_NVME;
2946 list[i].fc4_features = nvme_features;
2950 /* Last device exit. */
2951 if (list[i].d_id.b.rsvd_1 != 0)
2956 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2958 struct qla_work_evt *e;
2960 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2962 return QLA_FUNCTION_FAILED;
2964 e->u.fcport.fcport = fcport;
2965 fcport->flags |= FCF_ASYNC_ACTIVE;
2966 return qla2x00_post_work(vha, e);
2969 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2971 struct fc_port *fcport = ea->fcport;
2973 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2974 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2975 __func__, fcport->port_name, fcport->disc_state,
2976 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2977 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2979 if (fcport->disc_state == DSC_DELETE_PEND)
2982 if (ea->sp->gen2 != fcport->login_gen) {
2983 /* target side must have changed it. */
2984 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2985 "%s %8phC generation changed\n",
2986 __func__, fcport->port_name);
2988 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2992 qla_post_iidma_work(vha, fcport);
2995 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2997 struct scsi_qla_host *vha = sp->vha;
2998 struct qla_hw_data *ha = vha->hw;
2999 fc_port_t *fcport = sp->fcport;
3000 struct ct_sns_rsp *ct_rsp;
3001 struct event_arg ea;
3003 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3005 ql_dbg(ql_dbg_disc, vha, 0x2053,
3006 "Async done-%s res %x, WWPN %8phC \n",
3007 sp->name, res, fcport->port_name);
3009 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3011 if (res == QLA_FUNCTION_TIMEOUT)
3014 if (res == (DID_ERROR << 16)) {
3015 /* entry status error */
3018 if ((ct_rsp->header.reason_code ==
3019 CT_REASON_INVALID_COMMAND_CODE) ||
3020 (ct_rsp->header.reason_code ==
3021 CT_REASON_COMMAND_UNSUPPORTED)) {
3022 ql_dbg(ql_dbg_disc, vha, 0x2019,
3023 "GPSC command unsupported, disabling query.\n");
3024 ha->flags.gpsc_supported = 0;
3028 fcport->fp_speed = qla2x00_port_speed_capability(
3029 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3031 ql_dbg(ql_dbg_disc, vha, 0x2054,
3032 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3033 sp->name, fcport->fabric_port_name,
3034 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3035 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3037 memset(&ea, 0, sizeof(ea));
3041 qla24xx_handle_gpsc_event(vha, &ea);
3047 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3049 int rval = QLA_FUNCTION_FAILED;
3050 struct ct_sns_req *ct_req;
3053 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3056 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3060 sp->type = SRB_CT_PTHRU_CMD;
3062 sp->gen1 = fcport->rscn_gen;
3063 sp->gen2 = fcport->login_gen;
3065 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3067 /* CT_IU preamble */
3068 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3072 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3075 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3076 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3077 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3078 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3079 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3080 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3081 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3083 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3084 sp->done = qla24xx_async_gpsc_sp_done;
3086 ql_dbg(ql_dbg_disc, vha, 0x205e,
3087 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3088 sp->name, fcport->port_name, sp->handle,
3089 fcport->loop_id, fcport->d_id.b.domain,
3090 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3092 rval = qla2x00_start_sp(sp);
3093 if (rval != QLA_SUCCESS)
3099 fcport->flags &= ~FCF_ASYNC_SENT;
3101 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3105 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3107 struct qla_work_evt *e;
3109 if (test_bit(UNLOADING, &vha->dpc_flags) ||
3110 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
3113 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3115 return QLA_FUNCTION_FAILED;
3117 e->u.gpnid.id = *id;
3118 return qla2x00_post_work(vha, e);
3121 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3123 struct srb_iocb *c = &sp->u.iocb_cmd;
3127 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
3129 case SRB_CT_PTHRU_CMD:
3131 if (sp->u.iocb_cmd.u.ctarg.req) {
3132 dma_free_coherent(&vha->hw->pdev->dev,
3133 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3134 sp->u.iocb_cmd.u.ctarg.req,
3135 sp->u.iocb_cmd.u.ctarg.req_dma);
3136 sp->u.iocb_cmd.u.ctarg.req = NULL;
3139 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3140 dma_free_coherent(&vha->hw->pdev->dev,
3141 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3142 sp->u.iocb_cmd.u.ctarg.rsp,
3143 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3144 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3152 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3154 fc_port_t *fcport, *conflict, *t;
3157 ql_dbg(ql_dbg_disc, vha, 0xffff,
3158 "%s %d port_id: %06x\n",
3159 __func__, __LINE__, ea->id.b24);
3162 /* cable is disconnected */
3163 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3164 if (fcport->d_id.b24 == ea->id.b24)
3165 fcport->scan_state = QLA_FCPORT_SCAN;
3167 qlt_schedule_sess_for_deletion(fcport);
3170 /* cable is connected */
3171 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3173 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3175 if ((conflict->d_id.b24 == ea->id.b24) &&
3176 (fcport != conflict))
3178 * 2 fcports with conflict Nport ID or
3179 * an existing fcport is having nport ID
3180 * conflict with new fcport.
3183 conflict->scan_state = QLA_FCPORT_SCAN;
3185 qlt_schedule_sess_for_deletion(conflict);
3188 fcport->scan_needed = 0;
3190 fcport->scan_state = QLA_FCPORT_FOUND;
3191 fcport->flags |= FCF_FABRIC_DEVICE;
3192 if (fcport->login_retry == 0) {
3193 fcport->login_retry =
3194 vha->hw->login_retry_count;
3195 ql_dbg(ql_dbg_disc, vha, 0xffff,
3196 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3197 fcport->port_name, fcport->loop_id,
3198 fcport->login_retry);
3200 switch (fcport->disc_state) {
3201 case DSC_LOGIN_COMPLETE:
3202 /* recheck session is still intact. */
3203 ql_dbg(ql_dbg_disc, vha, 0x210d,
3204 "%s %d %8phC revalidate session with ADISC\n",
3205 __func__, __LINE__, fcport->port_name);
3206 data[0] = data[1] = 0;
3207 qla2x00_post_async_adisc_work(vha, fcport,
3211 ql_dbg(ql_dbg_disc, vha, 0x210d,
3212 "%s %d %8phC login\n", __func__, __LINE__,
3214 fcport->d_id = ea->id;
3215 qla24xx_fcport_handle_login(vha, fcport);
3217 case DSC_DELETE_PEND:
3218 fcport->d_id = ea->id;
3221 fcport->d_id = ea->id;
3225 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3227 if (conflict->d_id.b24 == ea->id.b24) {
3228 /* 2 fcports with conflict Nport ID or
3229 * an existing fcport is having nport ID
3230 * conflict with new fcport.
3232 ql_dbg(ql_dbg_disc, vha, 0xffff,
3233 "%s %d %8phC DS %d\n",
3235 conflict->port_name,
3236 conflict->disc_state);
3238 conflict->scan_state = QLA_FCPORT_SCAN;
3239 qlt_schedule_sess_for_deletion(conflict);
3243 /* create new fcport */
3244 ql_dbg(ql_dbg_disc, vha, 0x2065,
3245 "%s %d %8phC post new sess\n",
3246 __func__, __LINE__, ea->port_name);
3247 qla24xx_post_newsess_work(vha, &ea->id,
3248 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3253 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3255 struct scsi_qla_host *vha = sp->vha;
3256 struct ct_sns_req *ct_req =
3257 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3258 struct ct_sns_rsp *ct_rsp =
3259 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3260 struct event_arg ea;
3261 struct qla_work_evt *e;
3262 unsigned long flags;
3265 ql_dbg(ql_dbg_disc, vha, 0x2066,
3266 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3267 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3268 ct_rsp->rsp.gpn_id.port_name);
3270 ql_dbg(ql_dbg_disc, vha, 0x2066,
3271 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3272 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3273 ct_rsp->rsp.gpn_id.port_name);
3275 memset(&ea, 0, sizeof(ea));
3276 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3278 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3281 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3282 list_del(&sp->elem);
3283 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3286 if (res == QLA_FUNCTION_TIMEOUT) {
3287 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3291 } else if (sp->gen1) {
3292 /* There was another RSCN for this Nport ID */
3293 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3298 qla24xx_handle_gpnid_event(vha, &ea);
3300 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3302 /* please ignore kernel warning. otherwise, we have mem leak. */
3303 dma_free_coherent(&vha->hw->pdev->dev,
3304 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3305 sp->u.iocb_cmd.u.ctarg.req,
3306 sp->u.iocb_cmd.u.ctarg.req_dma);
3307 sp->u.iocb_cmd.u.ctarg.req = NULL;
3309 dma_free_coherent(&vha->hw->pdev->dev,
3310 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3311 sp->u.iocb_cmd.u.ctarg.rsp,
3312 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3313 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3320 qla2x00_post_work(vha, e);
3323 /* Get WWPN with Nport ID. */
3324 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3326 int rval = QLA_FUNCTION_FAILED;
3327 struct ct_sns_req *ct_req;
3329 struct ct_sns_pkt *ct_sns;
3330 unsigned long flags;
3332 if (!vha->flags.online)
3335 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3339 sp->type = SRB_CT_PTHRU_CMD;
3341 sp->u.iocb_cmd.u.ctarg.id = *id;
3343 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3345 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3346 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3347 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3349 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3354 list_add_tail(&sp->elem, &vha->gpnid_list);
3355 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3357 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3358 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3360 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3361 if (!sp->u.iocb_cmd.u.ctarg.req) {
3362 ql_log(ql_log_warn, vha, 0xd041,
3363 "Failed to allocate ct_sns request.\n");
3367 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3368 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3370 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3371 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3372 ql_log(ql_log_warn, vha, 0xd042,
3373 "Failed to allocate ct_sns request.\n");
3377 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3378 memset(ct_sns, 0, sizeof(*ct_sns));
3380 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3381 /* CT_IU preamble */
3382 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3385 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3387 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3388 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3389 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3391 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3392 sp->done = qla2x00_async_gpnid_sp_done;
3394 ql_dbg(ql_dbg_disc, vha, 0x2067,
3395 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3396 sp->handle, &ct_req->req.port_id.port_id);
3398 rval = qla2x00_start_sp(sp);
3399 if (rval != QLA_SUCCESS)
3405 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3406 list_del(&sp->elem);
3407 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3409 if (sp->u.iocb_cmd.u.ctarg.req) {
3410 dma_free_coherent(&vha->hw->pdev->dev,
3411 sizeof(struct ct_sns_pkt),
3412 sp->u.iocb_cmd.u.ctarg.req,
3413 sp->u.iocb_cmd.u.ctarg.req_dma);
3414 sp->u.iocb_cmd.u.ctarg.req = NULL;
3416 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3417 dma_free_coherent(&vha->hw->pdev->dev,
3418 sizeof(struct ct_sns_pkt),
3419 sp->u.iocb_cmd.u.ctarg.rsp,
3420 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3421 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3429 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3431 fc_port_t *fcport = ea->fcport;
3433 qla24xx_post_gnl_work(vha, fcport);
3436 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3438 struct scsi_qla_host *vha = sp->vha;
3439 fc_port_t *fcport = sp->fcport;
3440 struct ct_sns_rsp *ct_rsp;
3441 struct event_arg ea;
3442 uint8_t fc4_scsi_feat;
3443 uint8_t fc4_nvme_feat;
3445 ql_dbg(ql_dbg_disc, vha, 0x2133,
3446 "Async done-%s res %x ID %x. %8phC\n",
3447 sp->name, res, fcport->d_id.b24, fcport->port_name);
3449 fcport->flags &= ~FCF_ASYNC_SENT;
3450 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3451 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3452 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3455 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3456 * The format of the FC-4 Features object, as defined by the FC-4,
3457 * Shall be an array of 4-bit values, one for each type code value
3460 if (fc4_scsi_feat & 0xf) {
3462 fcport->fc4_type = FS_FC4TYPE_FCP;
3463 fcport->fc4_features = fc4_scsi_feat & 0xf;
3466 if (fc4_nvme_feat & 0xf) {
3467 /* w5 [00:03]/28h */
3468 fcport->fc4_type |= FS_FC4TYPE_NVME;
3469 fcport->fc4_features = fc4_nvme_feat & 0xf;
3473 memset(&ea, 0, sizeof(ea));
3475 ea.fcport = sp->fcport;
3478 qla24xx_handle_gffid_event(vha, &ea);
3482 /* Get FC4 Feature with Nport ID. */
3483 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3485 int rval = QLA_FUNCTION_FAILED;
3486 struct ct_sns_req *ct_req;
3489 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3492 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3496 fcport->flags |= FCF_ASYNC_SENT;
3497 sp->type = SRB_CT_PTHRU_CMD;
3499 sp->gen1 = fcport->rscn_gen;
3500 sp->gen2 = fcport->login_gen;
3502 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3503 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3505 /* CT_IU preamble */
3506 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3509 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3510 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3511 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3513 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3514 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3515 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3516 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3517 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3518 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3519 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3521 sp->done = qla24xx_async_gffid_sp_done;
3523 ql_dbg(ql_dbg_disc, vha, 0x2132,
3524 "Async-%s hdl=%x %8phC.\n", sp->name,
3525 sp->handle, fcport->port_name);
3527 rval = qla2x00_start_sp(sp);
3528 if (rval != QLA_SUCCESS)
3534 fcport->flags &= ~FCF_ASYNC_SENT;
3538 /* GPN_FT + GNN_FT*/
3539 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3541 struct qla_hw_data *ha = vha->hw;
3542 scsi_qla_host_t *vp;
3543 unsigned long flags;
3547 if (!ha->num_vhosts)
3550 spin_lock_irqsave(&ha->vport_slock, flags);
3551 list_for_each_entry(vp, &ha->vp_list, list) {
3552 twwn = wwn_to_u64(vp->port_name);
3558 spin_unlock_irqrestore(&ha->vport_slock, flags);
3563 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3568 struct fab_scan_rp *rp, *trp;
3569 unsigned long flags;
3571 u16 dup = 0, dup_cnt = 0;
3573 ql_dbg(ql_dbg_disc, vha, 0xffff,
3574 "%s enter\n", __func__);
3576 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3577 ql_dbg(ql_dbg_disc, vha, 0xffff,
3578 "%s scan stop due to chip reset %x/%x\n",
3579 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3585 vha->scan.scan_retry++;
3586 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3587 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3588 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3590 ql_dbg(ql_dbg_disc, vha, 0xffff,
3591 "Fabric scan failed on all retries.\n");
3595 vha->scan.scan_retry = 0;
3597 list_for_each_entry(fcport, &vha->vp_fcports, list)
3598 fcport->scan_state = QLA_FCPORT_SCAN;
3600 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3604 rp = &vha->scan.l[i];
3607 wwn = wwn_to_u64(rp->port_name);
3611 /* Remove duplicate NPORT ID entries from switch data base */
3612 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3613 trp = &vha->scan.l[k];
3614 if (rp->id.b24 == trp->id.b24) {
3617 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3619 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3620 rp->id.b24, rp->port_name, trp->port_name);
3621 memset(trp, 0, sizeof(*trp));
3625 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3628 /* Bypass reserved domain fields. */
3629 if ((rp->id.b.domain & 0xf0) == 0xf0)
3632 /* Bypass virtual ports of the same host. */
3633 if (qla2x00_is_a_vp(vha, wwn))
3636 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3637 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3639 fcport->scan_state = QLA_FCPORT_FOUND;
3642 * If device was not a fabric device before.
3644 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3645 qla2x00_clear_loop_id(fcport);
3646 fcport->flags |= FCF_FABRIC_DEVICE;
3647 } else if (fcport->d_id.b24 != rp->id.b24 ||
3648 (fcport->scan_needed &&
3649 fcport->port_type != FCT_INITIATOR &&
3650 fcport->port_type != FCT_NVME_INITIATOR)) {
3651 qlt_schedule_sess_for_deletion(fcport);
3653 fcport->d_id.b24 = rp->id.b24;
3654 fcport->scan_needed = 0;
3659 ql_dbg(ql_dbg_disc, vha, 0xffff,
3660 "%s %d %8phC post new sess\n",
3661 __func__, __LINE__, rp->port_name);
3662 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3663 rp->node_name, NULL, rp->fc4type);
3668 ql_log(ql_log_warn, vha, 0xffff,
3669 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3674 * Logout all previous fabric dev marked lost, except FCP2 devices.
3676 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3677 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3678 fcport->scan_needed = 0;
3682 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3683 bool do_delete = false;
3685 if (fcport->scan_needed &&
3686 fcport->disc_state == DSC_LOGIN_PEND) {
3687 /* Cable got disconnected after we sent
3688 * a login. Do delete to prevent timeout.
3690 fcport->logout_on_delete = 1;
3694 fcport->scan_needed = 0;
3695 if (((qla_dual_mode_enabled(vha) ||
3696 qla_ini_mode_enabled(vha)) &&
3697 atomic_read(&fcport->state) == FCS_ONLINE) ||
3699 if (fcport->loop_id != FC_NO_LOOP_ID) {
3700 if (fcport->flags & FCF_FCP2_DEVICE)
3701 fcport->logout_on_delete = 0;
3703 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3704 "%s %d %8phC post del sess\n",
3708 qlt_schedule_sess_for_deletion(fcport);
3713 if (fcport->scan_needed ||
3714 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3715 if (fcport->login_retry == 0) {
3716 fcport->login_retry =
3717 vha->hw->login_retry_count;
3718 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3719 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3720 fcport->port_name, fcport->loop_id,
3721 fcport->login_retry);
3723 fcport->scan_needed = 0;
3724 qla24xx_fcport_handle_login(vha, fcport);
3731 qla24xx_sp_unmap(vha, sp);
3732 spin_lock_irqsave(&vha->work_lock, flags);
3733 vha->scan.scan_flags &= ~SF_SCANNING;
3734 spin_unlock_irqrestore(&vha->work_lock, flags);
3737 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3738 if (fcport->scan_needed) {
3739 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3740 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3747 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3750 struct qla_work_evt *e;
3752 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3753 return QLA_PARAMETER_ERROR;
3755 e = qla2x00_alloc_work(vha, cmd);
3757 return QLA_FUNCTION_FAILED;
3761 return qla2x00_post_work(vha, e);
3764 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3767 struct qla_work_evt *e;
3769 if (cmd != QLA_EVT_GPNFT)
3770 return QLA_PARAMETER_ERROR;
3772 e = qla2x00_alloc_work(vha, cmd);
3774 return QLA_FUNCTION_FAILED;
3776 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3779 return qla2x00_post_work(vha, e);
3782 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3785 struct qla_hw_data *ha = vha->hw;
3786 int num_fibre_dev = ha->max_fibre_devices;
3787 struct ct_sns_req *ct_req =
3788 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3789 struct ct_sns_gpnft_rsp *ct_rsp =
3790 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3791 struct ct_sns_gpn_ft_data *d;
3792 struct fab_scan_rp *rp;
3793 u16 cmd = be16_to_cpu(ct_req->command);
3794 u8 fc4_type = sp->gen2;
3801 for (i = 0; i < num_fibre_dev; i++) {
3802 d = &ct_rsp->entries[i];
3805 id.b.domain = d->port_id[0];
3806 id.b.area = d->port_id[1];
3807 id.b.al_pa = d->port_id[2];
3808 wwn = wwn_to_u64(d->port_name);
3810 if (id.b24 == 0 || wwn == 0)
3813 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3814 if (cmd == GPN_FT_CMD) {
3815 rp = &vha->scan.l[j];
3817 memcpy(rp->port_name, d->port_name, 8);
3819 rp->fc4type = FS_FC4TYPE_FCP;
3821 for (k = 0; k < num_fibre_dev; k++) {
3822 rp = &vha->scan.l[k];
3823 if (id.b24 == rp->id.b24) {
3824 memcpy(rp->node_name,
3831 /* Search if the fibre device supports FC4_TYPE_NVME */
3832 if (cmd == GPN_FT_CMD) {
3835 for (k = 0; k < num_fibre_dev; k++) {
3836 rp = &vha->scan.l[k];
3837 if (!memcmp(rp->port_name,
3840 * Supports FC-NVMe & FCP
3842 rp->fc4type |= FS_FC4TYPE_NVME;
3848 /* We found new FC-NVMe only port */
3850 for (k = 0; k < num_fibre_dev; k++) {
3851 rp = &vha->scan.l[k];
3852 if (wwn_to_u64(rp->port_name)) {
3856 memcpy(rp->port_name,
3865 for (k = 0; k < num_fibre_dev; k++) {
3866 rp = &vha->scan.l[k];
3867 if (id.b24 == rp->id.b24) {
3868 memcpy(rp->node_name,
3878 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3880 struct scsi_qla_host *vha = sp->vha;
3881 struct ct_sns_req *ct_req =
3882 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3883 u16 cmd = be16_to_cpu(ct_req->command);
3884 u8 fc4_type = sp->gen2;
3885 unsigned long flags;
3888 /* gen2 field is holding the fc4type */
3889 ql_dbg(ql_dbg_disc, vha, 0xffff,
3890 "Async done-%s res %x FC4Type %x\n",
3891 sp->name, res, sp->gen2);
3893 del_timer(&sp->u.iocb_cmd.timer);
3896 unsigned long flags;
3897 const char *name = sp->name;
3900 * We are in an Interrupt context, queue up this
3901 * sp for GNNFT_DONE work. This will allow all
3902 * the resource to get freed up.
3904 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3905 QLA_EVT_GNNFT_DONE);
3907 /* Cleanup here to prevent memory leak */
3908 qla24xx_sp_unmap(vha, sp);
3910 spin_lock_irqsave(&vha->work_lock, flags);
3911 vha->scan.scan_flags &= ~SF_SCANNING;
3912 vha->scan.scan_retry++;
3913 spin_unlock_irqrestore(&vha->work_lock, flags);
3915 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3916 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3917 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3918 qla2xxx_wake_dpc(vha);
3920 ql_dbg(ql_dbg_disc, vha, 0xffff,
3921 "Async done-%s rescan failed on all retries.\n",
3928 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3930 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3931 cmd == GNN_FT_CMD) {
3932 spin_lock_irqsave(&vha->work_lock, flags);
3933 vha->scan.scan_flags &= ~SF_SCANNING;
3934 spin_unlock_irqrestore(&vha->work_lock, flags);
3937 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3939 qla24xx_sp_unmap(vha, sp);
3940 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3941 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3946 if (cmd == GPN_FT_CMD) {
3947 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3948 QLA_EVT_GPNFT_DONE);
3950 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3951 QLA_EVT_GNNFT_DONE);
3955 qla24xx_sp_unmap(vha, sp);
3956 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3957 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3963 * Get WWNN list for fc4_type
3965 * It is assumed the same SRB is re-used from GPNFT to avoid
3966 * mem free & re-alloc
3968 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3971 int rval = QLA_FUNCTION_FAILED;
3972 struct ct_sns_req *ct_req;
3973 struct ct_sns_pkt *ct_sns;
3974 unsigned long flags;
3976 if (!vha->flags.online) {
3977 spin_lock_irqsave(&vha->work_lock, flags);
3978 vha->scan.scan_flags &= ~SF_SCANNING;
3979 spin_unlock_irqrestore(&vha->work_lock, flags);
3983 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3984 ql_log(ql_log_warn, vha, 0xffff,
3985 "%s: req %p rsp %p are not setup\n",
3986 __func__, sp->u.iocb_cmd.u.ctarg.req,
3987 sp->u.iocb_cmd.u.ctarg.rsp);
3988 spin_lock_irqsave(&vha->work_lock, flags);
3989 vha->scan.scan_flags &= ~SF_SCANNING;
3990 spin_unlock_irqrestore(&vha->work_lock, flags);
3992 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3993 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3997 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3998 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3999 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4000 sp->u.iocb_cmd.u.ctarg.req_size);
4002 sp->type = SRB_CT_PTHRU_CMD;
4004 sp->gen1 = vha->hw->base_qpair->chip_reset;
4005 sp->gen2 = fc4_type;
4007 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4008 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4010 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4011 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4013 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4014 /* CT_IU preamble */
4015 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4016 sp->u.iocb_cmd.u.ctarg.rsp_size);
4019 ct_req->req.gpn_ft.port_type = fc4_type;
4021 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4022 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4024 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4026 ql_dbg(ql_dbg_disc, vha, 0xffff,
4027 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4028 sp->handle, ct_req->req.gpn_ft.port_type);
4030 rval = qla2x00_start_sp(sp);
4031 if (rval != QLA_SUCCESS) {
4038 if (sp->u.iocb_cmd.u.ctarg.req) {
4039 dma_free_coherent(&vha->hw->pdev->dev,
4040 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4041 sp->u.iocb_cmd.u.ctarg.req,
4042 sp->u.iocb_cmd.u.ctarg.req_dma);
4043 sp->u.iocb_cmd.u.ctarg.req = NULL;
4045 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4046 dma_free_coherent(&vha->hw->pdev->dev,
4047 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4048 sp->u.iocb_cmd.u.ctarg.rsp,
4049 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4050 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4055 spin_lock_irqsave(&vha->work_lock, flags);
4056 vha->scan.scan_flags &= ~SF_SCANNING;
4057 if (vha->scan.scan_flags == 0) {
4058 ql_dbg(ql_dbg_disc, vha, 0xffff,
4059 "%s: schedule\n", __func__);
4060 vha->scan.scan_flags |= SF_QUEUED;
4061 schedule_delayed_work(&vha->scan.scan_work, 5);
4063 spin_unlock_irqrestore(&vha->work_lock, flags);
4069 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4071 ql_dbg(ql_dbg_disc, vha, 0xffff,
4072 "%s enter\n", __func__);
4073 qla24xx_async_gnnft(vha, sp, sp->gen2);
4076 /* Get WWPN list for certain fc4_type */
4077 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4079 int rval = QLA_FUNCTION_FAILED;
4080 struct ct_sns_req *ct_req;
4081 struct ct_sns_pkt *ct_sns;
4083 unsigned long flags;
4085 ql_dbg(ql_dbg_disc, vha, 0xffff,
4086 "%s enter\n", __func__);
4088 if (!vha->flags.online)
4091 spin_lock_irqsave(&vha->work_lock, flags);
4092 if (vha->scan.scan_flags & SF_SCANNING) {
4093 spin_unlock_irqrestore(&vha->work_lock, flags);
4094 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4097 vha->scan.scan_flags |= SF_SCANNING;
4098 spin_unlock_irqrestore(&vha->work_lock, flags);
4100 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4101 ql_dbg(ql_dbg_disc, vha, 0xffff,
4102 "%s: Performing FCP Scan\n", __func__);
4105 sp->free(sp); /* should not happen */
4107 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4109 spin_lock_irqsave(&vha->work_lock, flags);
4110 vha->scan.scan_flags &= ~SF_SCANNING;
4111 spin_unlock_irqrestore(&vha->work_lock, flags);
4115 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4116 sizeof(struct ct_sns_pkt),
4117 &sp->u.iocb_cmd.u.ctarg.req_dma,
4119 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4120 if (!sp->u.iocb_cmd.u.ctarg.req) {
4121 ql_log(ql_log_warn, vha, 0xffff,
4122 "Failed to allocate ct_sns request.\n");
4123 spin_lock_irqsave(&vha->work_lock, flags);
4124 vha->scan.scan_flags &= ~SF_SCANNING;
4125 spin_unlock_irqrestore(&vha->work_lock, flags);
4129 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4131 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4132 ((vha->hw->max_fibre_devices - 1) *
4133 sizeof(struct ct_sns_gpn_ft_data));
4135 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4137 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4139 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4140 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4141 ql_log(ql_log_warn, vha, 0xffff,
4142 "Failed to allocate ct_sns request.\n");
4143 spin_lock_irqsave(&vha->work_lock, flags);
4144 vha->scan.scan_flags &= ~SF_SCANNING;
4145 spin_unlock_irqrestore(&vha->work_lock, flags);
4146 dma_free_coherent(&vha->hw->pdev->dev,
4147 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4148 sp->u.iocb_cmd.u.ctarg.req,
4149 sp->u.iocb_cmd.u.ctarg.req_dma);
4150 sp->u.iocb_cmd.u.ctarg.req = NULL;
4154 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4156 ql_dbg(ql_dbg_disc, vha, 0xffff,
4157 "%s scan list size %d\n", __func__, vha->scan.size);
4159 memset(vha->scan.l, 0, vha->scan.size);
4161 ql_dbg(ql_dbg_disc, vha, 0xffff,
4162 "NVME scan did not provide SP\n");
4166 sp->type = SRB_CT_PTHRU_CMD;
4168 sp->gen1 = vha->hw->base_qpair->chip_reset;
4169 sp->gen2 = fc4_type;
4171 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4172 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4174 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4175 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4176 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4178 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4179 /* CT_IU preamble */
4180 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4183 ct_req->req.gpn_ft.port_type = fc4_type;
4185 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4187 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4189 ql_dbg(ql_dbg_disc, vha, 0xffff,
4190 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4191 sp->handle, ct_req->req.gpn_ft.port_type);
4193 rval = qla2x00_start_sp(sp);
4194 if (rval != QLA_SUCCESS) {
4201 if (sp->u.iocb_cmd.u.ctarg.req) {
4202 dma_free_coherent(&vha->hw->pdev->dev,
4203 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4204 sp->u.iocb_cmd.u.ctarg.req,
4205 sp->u.iocb_cmd.u.ctarg.req_dma);
4206 sp->u.iocb_cmd.u.ctarg.req = NULL;
4208 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4209 dma_free_coherent(&vha->hw->pdev->dev,
4210 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4211 sp->u.iocb_cmd.u.ctarg.rsp,
4212 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4213 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4218 spin_lock_irqsave(&vha->work_lock, flags);
4219 vha->scan.scan_flags &= ~SF_SCANNING;
4220 if (vha->scan.scan_flags == 0) {
4221 ql_dbg(ql_dbg_disc, vha, 0xffff,
4222 "%s: schedule\n", __func__);
4223 vha->scan.scan_flags |= SF_QUEUED;
4224 schedule_delayed_work(&vha->scan.scan_work, 5);
4226 spin_unlock_irqrestore(&vha->work_lock, flags);
4232 void qla_scan_work_fn(struct work_struct *work)
4234 struct fab_scan *s = container_of(to_delayed_work(work),
4235 struct fab_scan, scan_work);
4236 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4238 unsigned long flags;
4240 ql_dbg(ql_dbg_disc, vha, 0xffff,
4241 "%s: schedule loop resync\n", __func__);
4242 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4243 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4244 qla2xxx_wake_dpc(vha);
4245 spin_lock_irqsave(&vha->work_lock, flags);
4246 vha->scan.scan_flags &= ~SF_QUEUED;
4247 spin_unlock_irqrestore(&vha->work_lock, flags);
4251 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4253 qla24xx_post_gnl_work(vha, ea->fcport);
4256 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4258 struct scsi_qla_host *vha = sp->vha;
4259 fc_port_t *fcport = sp->fcport;
4260 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4261 struct event_arg ea;
4264 fcport->flags &= ~FCF_ASYNC_SENT;
4265 wwnn = wwn_to_u64(node_name);
4267 memcpy(fcport->node_name, node_name, WWN_SIZE);
4269 memset(&ea, 0, sizeof(ea));
4274 ql_dbg(ql_dbg_disc, vha, 0x204f,
4275 "Async done-%s res %x, WWPN %8phC %8phC\n",
4276 sp->name, res, fcport->port_name, fcport->node_name);
4278 qla24xx_handle_gnnid_event(vha, &ea);
4283 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4285 int rval = QLA_FUNCTION_FAILED;
4286 struct ct_sns_req *ct_req;
4289 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4292 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
4293 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4297 fcport->flags |= FCF_ASYNC_SENT;
4298 sp->type = SRB_CT_PTHRU_CMD;
4300 sp->gen1 = fcport->rscn_gen;
4301 sp->gen2 = fcport->login_gen;
4303 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4306 /* CT_IU preamble */
4307 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4311 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4314 /* req & rsp use the same buffer */
4315 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4316 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4317 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4318 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4319 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4320 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4321 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4323 sp->done = qla2x00_async_gnnid_sp_done;
4325 ql_dbg(ql_dbg_disc, vha, 0xffff,
4326 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4327 sp->name, fcport->port_name,
4328 sp->handle, fcport->loop_id, fcport->d_id.b24);
4330 rval = qla2x00_start_sp(sp);
4331 if (rval != QLA_SUCCESS)
4337 fcport->flags &= ~FCF_ASYNC_SENT;
4342 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4344 struct qla_work_evt *e;
4347 ls = atomic_read(&vha->loop_state);
4348 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4349 test_bit(UNLOADING, &vha->dpc_flags))
4352 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4354 return QLA_FUNCTION_FAILED;
4356 e->u.fcport.fcport = fcport;
4357 return qla2x00_post_work(vha, e);
4361 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4363 fc_port_t *fcport = ea->fcport;
4365 ql_dbg(ql_dbg_disc, vha, 0xffff,
4366 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4367 __func__, fcport->port_name, fcport->disc_state,
4368 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4369 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4371 if (fcport->disc_state == DSC_DELETE_PEND)
4374 if (ea->sp->gen2 != fcport->login_gen) {
4375 /* target side must have changed it. */
4376 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4377 "%s %8phC generation changed\n",
4378 __func__, fcport->port_name);
4380 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4384 qla24xx_post_gpsc_work(vha, fcport);
4387 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4389 struct scsi_qla_host *vha = sp->vha;
4390 fc_port_t *fcport = sp->fcport;
4391 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4392 struct event_arg ea;
4395 wwn = wwn_to_u64(fpn);
4397 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4399 memset(&ea, 0, sizeof(ea));
4404 ql_dbg(ql_dbg_disc, vha, 0x204f,
4405 "Async done-%s res %x, WWPN %8phC %8phC\n",
4406 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4408 qla24xx_handle_gfpnid_event(vha, &ea);
4413 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4415 int rval = QLA_FUNCTION_FAILED;
4416 struct ct_sns_req *ct_req;
4419 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4422 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4426 sp->type = SRB_CT_PTHRU_CMD;
4427 sp->name = "gfpnid";
4428 sp->gen1 = fcport->rscn_gen;
4429 sp->gen2 = fcport->login_gen;
4431 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4432 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4434 /* CT_IU preamble */
4435 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4439 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4442 /* req & rsp use the same buffer */
4443 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4444 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4445 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4446 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4447 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4448 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4449 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4451 sp->done = qla2x00_async_gfpnid_sp_done;
4453 ql_dbg(ql_dbg_disc, vha, 0xffff,
4454 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4455 sp->name, fcport->port_name,
4456 sp->handle, fcport->loop_id, fcport->d_id.b24);
4458 rval = qla2x00_start_sp(sp);
4459 if (rval != QLA_SUCCESS)
4466 fcport->flags &= ~FCF_ASYNC_SENT;
4471 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4473 struct qla_work_evt *e;
4476 ls = atomic_read(&vha->loop_state);
4477 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4478 test_bit(UNLOADING, &vha->dpc_flags))
4481 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4483 return QLA_FUNCTION_FAILED;
4485 e->u.fcport.fcport = fcport;
4486 return qla2x00_post_work(vha, e);