1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
19 qla2x00_free_fcport(fcport);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
28 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
29 "%s: sp hdl %x, result=%x bsg ptr %p\n",
30 __func__, sp->handle, res, bsg_job);
33 kref_put(&sp->cmd_kref, qla2x00_sp_release);
35 bsg_reply->result = res;
36 bsg_job_done(bsg_job, bsg_reply->result,
37 bsg_reply->reply_payload_rcv_len);
40 void qla2x00_bsg_sp_free(srb_t *sp)
42 struct qla_hw_data *ha = sp->vha->hw;
43 struct bsg_job *bsg_job = sp->u.bsg_job;
44 struct fc_bsg_request *bsg_request = bsg_job->request;
45 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
47 if (sp->type == SRB_FXIOCB_BCMD) {
48 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
49 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
51 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
52 dma_unmap_sg(&ha->pdev->dev,
53 bsg_job->request_payload.sg_list,
54 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
56 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
57 dma_unmap_sg(&ha->pdev->dev,
58 bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
62 if (sp->remap.remapped) {
63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
69 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
72 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
76 if (sp->type == SRB_CT_CMD ||
77 sp->type == SRB_FXIOCB_BCMD ||
78 sp->type == SRB_ELS_CMD_HST) {
79 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
80 queue_work(ha->wq, &sp->fcport->free_work);
87 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
88 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
90 int i, ret, num_valid;
92 struct qla_fcp_prio_entry *pri_entry;
93 uint32_t *bcode_val_ptr, bcode_val;
97 bcode = (uint8_t *)pri_cfg;
98 bcode_val_ptr = (uint32_t *)pri_cfg;
99 bcode_val = (uint32_t)(*bcode_val_ptr);
101 if (bcode_val == 0xFFFFFFFF) {
102 /* No FCP Priority config data in flash */
103 ql_dbg(ql_dbg_user, vha, 0x7051,
104 "No FCP Priority config data.\n");
108 if (memcmp(bcode, "HQOS", 4)) {
109 /* Invalid FCP priority data header*/
110 ql_dbg(ql_dbg_user, vha, 0x7052,
111 "Invalid FCP Priority data header. bcode=0x%x.\n",
118 pri_entry = &pri_cfg->entry[0];
119 for (i = 0; i < pri_cfg->num_entries; i++) {
120 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
125 if (num_valid == 0) {
126 /* No valid FCP priority data entries */
127 ql_dbg(ql_dbg_user, vha, 0x7053,
128 "No valid FCP Priority data entries.\n");
131 /* FCP priority data is valid */
132 ql_dbg(ql_dbg_user, vha, 0x7054,
133 "Valid FCP priority data. num entries = %d.\n",
141 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
143 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
144 struct fc_bsg_request *bsg_request = bsg_job->request;
145 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
146 scsi_qla_host_t *vha = shost_priv(host);
147 struct qla_hw_data *ha = vha->hw;
152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
154 goto exit_fcp_prio_cfg;
157 /* Get the sub command */
158 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
160 /* Only set config is allowed if config memory is not allocated */
161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
163 goto exit_fcp_prio_cfg;
166 case QLFC_FCP_PRIO_DISABLE:
167 if (ha->flags.fcp_prio_enabled) {
168 ha->flags.fcp_prio_enabled = 0;
169 ha->fcp_prio_cfg->attributes &=
170 ~FCP_PRIO_ATTR_ENABLE;
171 qla24xx_update_all_fcp_prio(vha);
172 bsg_reply->result = DID_OK;
175 bsg_reply->result = (DID_ERROR << 16);
176 goto exit_fcp_prio_cfg;
180 case QLFC_FCP_PRIO_ENABLE:
181 if (!ha->flags.fcp_prio_enabled) {
182 if (ha->fcp_prio_cfg) {
183 ha->flags.fcp_prio_enabled = 1;
184 ha->fcp_prio_cfg->attributes |=
185 FCP_PRIO_ATTR_ENABLE;
186 qla24xx_update_all_fcp_prio(vha);
187 bsg_reply->result = DID_OK;
190 bsg_reply->result = (DID_ERROR << 16);
191 goto exit_fcp_prio_cfg;
196 case QLFC_FCP_PRIO_GET_CONFIG:
197 len = bsg_job->reply_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
200 bsg_reply->result = (DID_ERROR << 16);
201 goto exit_fcp_prio_cfg;
204 bsg_reply->result = DID_OK;
205 bsg_reply->reply_payload_rcv_len =
207 bsg_job->reply_payload.sg_list,
208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
213 case QLFC_FCP_PRIO_SET_CONFIG:
214 len = bsg_job->request_payload.payload_len;
215 if (!len || len > FCP_PRIO_CFG_SIZE) {
216 bsg_reply->result = (DID_ERROR << 16);
218 goto exit_fcp_prio_cfg;
221 if (!ha->fcp_prio_cfg) {
222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
223 if (!ha->fcp_prio_cfg) {
224 ql_log(ql_log_warn, vha, 0x7050,
225 "Unable to allocate memory for fcp prio "
226 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
227 bsg_reply->result = (DID_ERROR << 16);
229 goto exit_fcp_prio_cfg;
233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
234 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
238 /* validate fcp priority data */
240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
241 bsg_reply->result = (DID_ERROR << 16);
243 /* If buffer was invalidatic int
244 * fcp_prio_cfg is of no use
246 vfree(ha->fcp_prio_cfg);
247 ha->fcp_prio_cfg = NULL;
248 goto exit_fcp_prio_cfg;
251 ha->flags.fcp_prio_enabled = 0;
252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
253 ha->flags.fcp_prio_enabled = 1;
254 qla24xx_update_all_fcp_prio(vha);
255 bsg_reply->result = DID_OK;
263 bsg_job_done(bsg_job, bsg_reply->result,
264 bsg_reply->reply_payload_rcv_len);
269 qla2x00_process_els(struct bsg_job *bsg_job)
271 struct fc_bsg_request *bsg_request = bsg_job->request;
272 struct fc_rport *rport;
273 fc_port_t *fcport = NULL;
274 struct Scsi_Host *host;
275 scsi_qla_host_t *vha;
276 struct qla_hw_data *ha;
279 int req_sg_cnt, rsp_sg_cnt;
280 int rval = (DID_ERROR << 16);
281 uint16_t nextlid = 0;
282 uint32_t els_cmd = 0;
284 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
285 rport = fc_bsg_to_rport(bsg_job);
286 fcport = *(fc_port_t **) rport->dd_data;
287 host = rport_to_shost(rport);
288 vha = shost_priv(host);
290 type = "FC_BSG_RPT_ELS";
292 host = fc_bsg_to_shost(bsg_job);
293 vha = shost_priv(host);
295 type = "FC_BSG_HST_ELS_NOLOGIN";
296 els_cmd = bsg_request->rqst_data.h_els.command_code;
297 if (els_cmd == ELS_AUTH_ELS)
298 return qla_edif_process_els(vha, bsg_job);
301 if (!vha->flags.online) {
302 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
307 /* pass through is supported only for ISP 4Gb or higher */
308 if (!IS_FWI2_CAPABLE(ha)) {
309 ql_dbg(ql_dbg_user, vha, 0x7001,
310 "ELS passthru not supported for ISP23xx based adapters.\n");
315 /* Multiple SG's are not supported for ELS requests */
316 if (bsg_job->request_payload.sg_cnt > 1 ||
317 bsg_job->reply_payload.sg_cnt > 1) {
318 ql_dbg(ql_dbg_user, vha, 0x7002,
319 "Multiple SG's are not supported for ELS requests, "
320 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
321 bsg_job->request_payload.sg_cnt,
322 bsg_job->reply_payload.sg_cnt);
327 /* ELS request for rport */
328 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
329 /* make sure the rport is logged in,
330 * if not perform fabric login
332 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
333 ql_dbg(ql_dbg_user, vha, 0x7003,
334 "Failed to login port %06X for ELS passthru.\n",
340 /* Allocate a dummy fcport structure, since functions
341 * preparing the IOCB and mailbox command retrieves port
342 * specific information from fcport structure. For Host based
343 * ELS commands there will be no fcport structure allocated
345 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
351 /* Initialize all required fields of fcport */
353 fcport->d_id.b.al_pa =
354 bsg_request->rqst_data.h_els.port_id[0];
355 fcport->d_id.b.area =
356 bsg_request->rqst_data.h_els.port_id[1];
357 fcport->d_id.b.domain =
358 bsg_request->rqst_data.h_els.port_id[2];
360 (fcport->d_id.b.al_pa == 0xFD) ?
361 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
365 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
366 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
368 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
369 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
371 goto done_free_fcport;
374 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
375 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
378 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
380 goto done_free_fcport;
383 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
384 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
385 ql_log(ql_log_warn, vha, 0x7008,
386 "dma mapping resulted in different sg counts, "
387 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
388 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
389 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
394 /* Alloc SRB structure */
395 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
402 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
403 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
405 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
406 "bsg_els_rpt" : "bsg_els_hst");
407 sp->u.bsg_job = bsg_job;
408 sp->free = qla2x00_bsg_sp_free;
409 sp->done = qla2x00_bsg_job_done;
411 ql_dbg(ql_dbg_user, vha, 0x700a,
412 "bsg rqst type: %s els type: %x - loop-id=%x "
413 "portid=%-2x%02x%02x.\n", type,
414 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
415 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
417 rval = qla2x00_start_sp(sp);
418 if (rval != QLA_SUCCESS) {
419 ql_log(ql_log_warn, vha, 0x700e,
420 "qla2x00_start_sp failed = %d\n", rval);
428 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
429 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
430 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
431 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
432 goto done_free_fcport;
435 if (bsg_request->msgcode != FC_BSG_RPT_ELS)
436 qla2x00_free_fcport(fcport);
441 static inline uint16_t
442 qla24xx_calc_ct_iocbs(uint16_t dsds)
448 iocbs += (dsds - 2) / 5;
456 qla2x00_process_ct(struct bsg_job *bsg_job)
459 struct fc_bsg_request *bsg_request = bsg_job->request;
460 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
461 scsi_qla_host_t *vha = shost_priv(host);
462 struct qla_hw_data *ha = vha->hw;
463 int rval = (DID_ERROR << 16);
464 int req_sg_cnt, rsp_sg_cnt;
466 struct fc_port *fcport;
467 char *type = "FC_BSG_HST_CT";
470 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
471 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
473 ql_log(ql_log_warn, vha, 0x700f,
474 "dma_map_sg return %d for request\n", req_sg_cnt);
479 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
480 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
482 ql_log(ql_log_warn, vha, 0x7010,
483 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
488 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
489 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
490 ql_log(ql_log_warn, vha, 0x7011,
491 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
492 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
493 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
498 if (!vha->flags.online) {
499 ql_log(ql_log_warn, vha, 0x7012,
500 "Host is not online.\n");
506 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
513 loop_id = vha->mgmt_svr_loop_id;
516 ql_dbg(ql_dbg_user, vha, 0x7013,
517 "Unknown loop id: %x.\n", loop_id);
522 /* Allocate a dummy fcport structure, since functions preparing the
523 * IOCB and mailbox command retrieves port specific information
524 * from fcport structure. For Host based ELS commands there will be
525 * no fcport structure allocated
527 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
529 ql_log(ql_log_warn, vha, 0x7014,
530 "Failed to allocate fcport.\n");
535 /* Initialize all required fields of fcport */
537 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
538 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
539 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
540 fcport->loop_id = loop_id;
542 /* Alloc SRB structure */
543 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
545 ql_log(ql_log_warn, vha, 0x7015,
546 "qla2x00_get_sp failed.\n");
548 goto done_free_fcport;
551 sp->type = SRB_CT_CMD;
553 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
554 sp->u.bsg_job = bsg_job;
555 sp->free = qla2x00_bsg_sp_free;
556 sp->done = qla2x00_bsg_job_done;
558 ql_dbg(ql_dbg_user, vha, 0x7016,
559 "bsg rqst type: %s else type: %x - "
560 "loop-id=%x portid=%02x%02x%02x.\n", type,
561 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
562 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
563 fcport->d_id.b.al_pa);
565 rval = qla2x00_start_sp(sp);
566 if (rval != QLA_SUCCESS) {
567 ql_log(ql_log_warn, vha, 0x7017,
568 "qla2x00_start_sp failed=%d.\n", rval);
571 goto done_free_fcport;
576 qla2x00_free_fcport(fcport);
578 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
579 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
580 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
581 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
586 /* Disable loopback mode */
588 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
593 uint16_t new_config[4];
594 struct qla_hw_data *ha = vha->hw;
596 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
597 goto done_reset_internal;
599 memset(new_config, 0 , sizeof(new_config));
600 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
601 ENABLE_INTERNAL_LOOPBACK ||
602 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
603 ENABLE_EXTERNAL_LOOPBACK) {
604 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
605 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
606 (new_config[0] & INTERNAL_LOOPBACK_MASK));
607 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
609 ha->notify_dcbx_comp = wait;
610 ha->notify_lb_portup_comp = wait2;
612 ret = qla81xx_set_port_config(vha, new_config);
613 if (ret != QLA_SUCCESS) {
614 ql_log(ql_log_warn, vha, 0x7025,
615 "Set port config failed.\n");
616 ha->notify_dcbx_comp = 0;
617 ha->notify_lb_portup_comp = 0;
619 goto done_reset_internal;
622 /* Wait for DCBX complete event */
623 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
624 (DCBX_COMP_TIMEOUT * HZ))) {
625 ql_dbg(ql_dbg_user, vha, 0x7026,
626 "DCBX completion not received.\n");
627 ha->notify_dcbx_comp = 0;
628 ha->notify_lb_portup_comp = 0;
630 goto done_reset_internal;
632 ql_dbg(ql_dbg_user, vha, 0x7027,
633 "DCBX completion received.\n");
636 !wait_for_completion_timeout(&ha->lb_portup_comp,
637 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
638 ql_dbg(ql_dbg_user, vha, 0x70c5,
639 "Port up completion not received.\n");
640 ha->notify_lb_portup_comp = 0;
642 goto done_reset_internal;
644 ql_dbg(ql_dbg_user, vha, 0x70c6,
645 "Port up completion received.\n");
647 ha->notify_dcbx_comp = 0;
648 ha->notify_lb_portup_comp = 0;
655 * Set the port configuration to enable the internal or external loopback
656 * depending on the loopback mode.
659 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
660 uint16_t *new_config, uint16_t mode)
664 unsigned long rem_tmo = 0, current_tmo = 0;
665 struct qla_hw_data *ha = vha->hw;
667 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
668 goto done_set_internal;
670 if (mode == INTERNAL_LOOPBACK)
671 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
672 else if (mode == EXTERNAL_LOOPBACK)
673 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
674 ql_dbg(ql_dbg_user, vha, 0x70be,
675 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
677 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
679 ha->notify_dcbx_comp = 1;
680 ret = qla81xx_set_port_config(vha, new_config);
681 if (ret != QLA_SUCCESS) {
682 ql_log(ql_log_warn, vha, 0x7021,
683 "set port config failed.\n");
684 ha->notify_dcbx_comp = 0;
686 goto done_set_internal;
689 /* Wait for DCBX complete event */
690 current_tmo = DCBX_COMP_TIMEOUT * HZ;
692 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
694 if (!ha->idc_extend_tmo || rem_tmo) {
695 ha->idc_extend_tmo = 0;
698 current_tmo = ha->idc_extend_tmo * HZ;
699 ha->idc_extend_tmo = 0;
703 ql_dbg(ql_dbg_user, vha, 0x7022,
704 "DCBX completion not received.\n");
705 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
707 * If the reset of the loopback mode doesn't work take a FCoE
708 * dump and reset the chip.
711 qla2xxx_dump_fw(vha);
712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
716 if (ha->flags.idc_compl_status) {
717 ql_dbg(ql_dbg_user, vha, 0x70c3,
718 "Bad status in IDC Completion AEN\n");
720 ha->flags.idc_compl_status = 0;
722 ql_dbg(ql_dbg_user, vha, 0x7023,
723 "DCBX completion received.\n");
726 ha->notify_dcbx_comp = 0;
727 ha->idc_extend_tmo = 0;
734 qla2x00_process_loopback(struct bsg_job *bsg_job)
736 struct fc_bsg_request *bsg_request = bsg_job->request;
737 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
738 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
739 scsi_qla_host_t *vha = shost_priv(host);
740 struct qla_hw_data *ha = vha->hw;
742 uint8_t command_sent;
744 struct msg_echo_lb elreq;
745 uint16_t response[MAILBOX_REGISTER_COUNT];
746 uint16_t config[4], new_config[4];
748 void *req_data = NULL;
749 dma_addr_t req_data_dma;
750 uint32_t req_data_len;
751 uint8_t *rsp_data = NULL;
752 dma_addr_t rsp_data_dma;
753 uint32_t rsp_data_len;
755 if (!vha->flags.online) {
756 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
760 memset(&elreq, 0, sizeof(elreq));
762 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
763 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
766 if (!elreq.req_sg_cnt) {
767 ql_log(ql_log_warn, vha, 0x701a,
768 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
772 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
773 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
776 if (!elreq.rsp_sg_cnt) {
777 ql_log(ql_log_warn, vha, 0x701b,
778 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
780 goto done_unmap_req_sg;
783 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
784 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
785 ql_log(ql_log_warn, vha, 0x701c,
786 "dma mapping resulted in different sg counts, "
787 "request_sg_cnt: %x dma_request_sg_cnt: %x "
788 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
789 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
790 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
794 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
795 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
796 &req_data_dma, GFP_KERNEL);
798 ql_log(ql_log_warn, vha, 0x701d,
799 "dma alloc failed for req_data.\n");
804 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
805 &rsp_data_dma, GFP_KERNEL);
807 ql_log(ql_log_warn, vha, 0x7004,
808 "dma alloc failed for rsp_data.\n");
810 goto done_free_dma_req;
813 /* Copy the request buffer in req_data now */
814 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
815 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
817 elreq.send_dma = req_data_dma;
818 elreq.rcv_dma = rsp_data_dma;
819 elreq.transfer_size = req_data_len;
821 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
822 elreq.iteration_count =
823 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
825 if (atomic_read(&vha->loop_state) == LOOP_READY &&
826 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
827 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
828 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
829 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
830 elreq.options == EXTERNAL_LOOPBACK))) {
831 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
832 ql_dbg(ql_dbg_user, vha, 0x701e,
833 "BSG request type: %s.\n", type);
834 command_sent = INT_DEF_LB_ECHO_CMD;
835 rval = qla2x00_echo_test(vha, &elreq, response);
837 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
838 memset(config, 0, sizeof(config));
839 memset(new_config, 0, sizeof(new_config));
841 if (qla81xx_get_port_config(vha, config)) {
842 ql_log(ql_log_warn, vha, 0x701f,
843 "Get port config failed.\n");
845 goto done_free_dma_rsp;
848 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
849 ql_dbg(ql_dbg_user, vha, 0x70c4,
850 "Loopback operation already in "
853 goto done_free_dma_rsp;
856 ql_dbg(ql_dbg_user, vha, 0x70c0,
857 "elreq.options=%04x\n", elreq.options);
859 if (elreq.options == EXTERNAL_LOOPBACK)
860 if (IS_QLA8031(ha) || IS_QLA8044(ha))
861 rval = qla81xx_set_loopback_mode(vha,
862 config, new_config, elreq.options);
864 rval = qla81xx_reset_loopback_mode(vha,
867 rval = qla81xx_set_loopback_mode(vha, config,
868 new_config, elreq.options);
872 goto done_free_dma_rsp;
875 type = "FC_BSG_HST_VENDOR_LOOPBACK";
876 ql_dbg(ql_dbg_user, vha, 0x7028,
877 "BSG request type: %s.\n", type);
879 command_sent = INT_DEF_LB_LOOPBACK_CMD;
880 rval = qla2x00_loopback_test(vha, &elreq, response);
882 if (response[0] == MBS_COMMAND_ERROR &&
883 response[1] == MBS_LB_RESET) {
884 ql_log(ql_log_warn, vha, 0x7029,
885 "MBX command error, Aborting ISP.\n");
886 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
887 qla2xxx_wake_dpc(vha);
888 qla2x00_wait_for_chip_reset(vha);
889 /* Also reset the MPI */
890 if (IS_QLA81XX(ha)) {
891 if (qla81xx_restart_mpi_firmware(vha) !=
893 ql_log(ql_log_warn, vha, 0x702a,
894 "MPI reset failed.\n");
899 goto done_free_dma_rsp;
905 /* Revert back to original port config
906 * Also clear internal loopback
908 ret = qla81xx_reset_loopback_mode(vha,
912 * If the reset of the loopback mode
913 * doesn't work take FCoE dump and then
916 qla2xxx_dump_fw(vha);
917 set_bit(ISP_ABORT_NEEDED,
924 type = "FC_BSG_HST_VENDOR_LOOPBACK";
925 ql_dbg(ql_dbg_user, vha, 0x702b,
926 "BSG request type: %s.\n", type);
927 command_sent = INT_DEF_LB_LOOPBACK_CMD;
928 rval = qla2x00_loopback_test(vha, &elreq, response);
933 ql_log(ql_log_warn, vha, 0x702c,
934 "Vendor request %s failed.\n", type);
937 bsg_reply->result = (DID_ERROR << 16);
938 bsg_reply->reply_payload_rcv_len = 0;
940 ql_dbg(ql_dbg_user, vha, 0x702d,
941 "Vendor request %s completed.\n", type);
942 bsg_reply->result = (DID_OK << 16);
943 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
944 bsg_job->reply_payload.sg_cnt, rsp_data,
948 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
949 sizeof(response) + sizeof(uint8_t);
950 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
951 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
953 fw_sts_ptr += sizeof(response);
954 *fw_sts_ptr = command_sent;
957 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
958 rsp_data, rsp_data_dma);
960 dma_free_coherent(&ha->pdev->dev, req_data_len,
961 req_data, req_data_dma);
963 dma_unmap_sg(&ha->pdev->dev,
964 bsg_job->reply_payload.sg_list,
965 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
967 dma_unmap_sg(&ha->pdev->dev,
968 bsg_job->request_payload.sg_list,
969 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
971 bsg_job_done(bsg_job, bsg_reply->result,
972 bsg_reply->reply_payload_rcv_len);
977 qla84xx_reset(struct bsg_job *bsg_job)
979 struct fc_bsg_request *bsg_request = bsg_job->request;
980 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
981 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
982 scsi_qla_host_t *vha = shost_priv(host);
983 struct qla_hw_data *ha = vha->hw;
987 if (!IS_QLA84XX(ha)) {
988 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
992 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
994 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
997 ql_log(ql_log_warn, vha, 0x7030,
998 "Vendor request 84xx reset failed.\n");
999 rval = (DID_ERROR << 16);
1002 ql_dbg(ql_dbg_user, vha, 0x7031,
1003 "Vendor request 84xx reset completed.\n");
1004 bsg_reply->result = DID_OK;
1005 bsg_job_done(bsg_job, bsg_reply->result,
1006 bsg_reply->reply_payload_rcv_len);
1013 qla84xx_updatefw(struct bsg_job *bsg_job)
1015 struct fc_bsg_request *bsg_request = bsg_job->request;
1016 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1017 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1018 scsi_qla_host_t *vha = shost_priv(host);
1019 struct qla_hw_data *ha = vha->hw;
1020 struct verify_chip_entry_84xx *mn = NULL;
1021 dma_addr_t mn_dma, fw_dma;
1022 void *fw_buf = NULL;
1030 if (!IS_QLA84XX(ha)) {
1031 ql_dbg(ql_dbg_user, vha, 0x7032,
1032 "Not 84xx, exiting.\n");
1036 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1037 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1039 ql_log(ql_log_warn, vha, 0x7033,
1040 "dma_map_sg returned %d for request.\n", sg_cnt);
1044 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1045 ql_log(ql_log_warn, vha, 0x7034,
1046 "DMA mapping resulted in different sg counts, "
1047 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1048 bsg_job->request_payload.sg_cnt, sg_cnt);
1053 data_len = bsg_job->request_payload.payload_len;
1054 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1055 &fw_dma, GFP_KERNEL);
1057 ql_log(ql_log_warn, vha, 0x7035,
1058 "DMA alloc failed for fw_buf.\n");
1063 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1064 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1066 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1068 ql_log(ql_log_warn, vha, 0x7036,
1069 "DMA alloc failed for fw buffer.\n");
1071 goto done_free_fw_buf;
1074 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1075 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1077 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1078 mn->entry_count = 1;
1080 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1081 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1082 options |= VCO_DIAG_FW;
1084 mn->options = cpu_to_le16(options);
1085 mn->fw_ver = cpu_to_le32(fw_ver);
1086 mn->fw_size = cpu_to_le32(data_len);
1087 mn->fw_seq_size = cpu_to_le32(data_len);
1088 put_unaligned_le64(fw_dma, &mn->dsd.address);
1089 mn->dsd.length = cpu_to_le32(data_len);
1090 mn->data_seg_cnt = cpu_to_le16(1);
1092 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1095 ql_log(ql_log_warn, vha, 0x7037,
1096 "Vendor request 84xx updatefw failed.\n");
1098 rval = (DID_ERROR << 16);
1100 ql_dbg(ql_dbg_user, vha, 0x7038,
1101 "Vendor request 84xx updatefw completed.\n");
1103 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1104 bsg_reply->result = DID_OK;
1107 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1110 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1113 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1114 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1117 bsg_job_done(bsg_job, bsg_reply->result,
1118 bsg_reply->reply_payload_rcv_len);
1123 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1125 struct fc_bsg_request *bsg_request = bsg_job->request;
1126 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1127 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1128 scsi_qla_host_t *vha = shost_priv(host);
1129 struct qla_hw_data *ha = vha->hw;
1130 struct access_chip_84xx *mn = NULL;
1131 dma_addr_t mn_dma, mgmt_dma;
1132 void *mgmt_b = NULL;
1134 struct qla_bsg_a84_mgmt *ql84_mgmt;
1136 uint32_t data_len = 0;
1137 uint32_t dma_direction = DMA_NONE;
1139 if (!IS_QLA84XX(ha)) {
1140 ql_log(ql_log_warn, vha, 0x703a,
1141 "Not 84xx, exiting.\n");
1145 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1147 ql_log(ql_log_warn, vha, 0x703c,
1148 "DMA alloc failed for fw buffer.\n");
1152 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1153 mn->entry_count = 1;
1154 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1155 switch (ql84_mgmt->mgmt.cmd) {
1156 case QLA84_MGMT_READ_MEM:
1157 case QLA84_MGMT_GET_INFO:
1158 sg_cnt = dma_map_sg(&ha->pdev->dev,
1159 bsg_job->reply_payload.sg_list,
1160 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1162 ql_log(ql_log_warn, vha, 0x703d,
1163 "dma_map_sg returned %d for reply.\n", sg_cnt);
1168 dma_direction = DMA_FROM_DEVICE;
1170 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1171 ql_log(ql_log_warn, vha, 0x703e,
1172 "DMA mapping resulted in different sg counts, "
1173 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1174 bsg_job->reply_payload.sg_cnt, sg_cnt);
1179 data_len = bsg_job->reply_payload.payload_len;
1181 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1182 &mgmt_dma, GFP_KERNEL);
1184 ql_log(ql_log_warn, vha, 0x703f,
1185 "DMA alloc failed for mgmt_b.\n");
1190 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1191 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1194 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1196 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1197 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1199 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1203 ql84_mgmt->mgmt.mgmtp.u.info.context);
1207 case QLA84_MGMT_WRITE_MEM:
1208 sg_cnt = dma_map_sg(&ha->pdev->dev,
1209 bsg_job->request_payload.sg_list,
1210 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1213 ql_log(ql_log_warn, vha, 0x7040,
1214 "dma_map_sg returned %d.\n", sg_cnt);
1219 dma_direction = DMA_TO_DEVICE;
1221 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1222 ql_log(ql_log_warn, vha, 0x7041,
1223 "DMA mapping resulted in different sg counts, "
1224 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1225 bsg_job->request_payload.sg_cnt, sg_cnt);
1230 data_len = bsg_job->request_payload.payload_len;
1231 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1232 &mgmt_dma, GFP_KERNEL);
1234 ql_log(ql_log_warn, vha, 0x7042,
1235 "DMA alloc failed for mgmt_b.\n");
1240 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1241 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1243 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1245 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1248 case QLA84_MGMT_CHNG_CONFIG:
1249 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1251 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1254 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1257 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1265 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1266 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1267 mn->dseg_count = cpu_to_le16(1);
1268 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1269 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1272 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1275 ql_log(ql_log_warn, vha, 0x7043,
1276 "Vendor request 84xx mgmt failed.\n");
1278 rval = (DID_ERROR << 16);
1281 ql_dbg(ql_dbg_user, vha, 0x7044,
1282 "Vendor request 84xx mgmt completed.\n");
1284 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1285 bsg_reply->result = DID_OK;
1287 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1288 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1289 bsg_reply->reply_payload_rcv_len =
1290 bsg_job->reply_payload.payload_len;
1292 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1293 bsg_job->reply_payload.sg_cnt, mgmt_b,
1300 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1302 if (dma_direction == DMA_TO_DEVICE)
1303 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1304 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1305 else if (dma_direction == DMA_FROM_DEVICE)
1306 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1307 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1310 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1313 bsg_job_done(bsg_job, bsg_reply->result,
1314 bsg_reply->reply_payload_rcv_len);
1319 qla24xx_iidma(struct bsg_job *bsg_job)
1321 struct fc_bsg_request *bsg_request = bsg_job->request;
1322 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1323 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1324 scsi_qla_host_t *vha = shost_priv(host);
1326 struct qla_port_param *port_param = NULL;
1327 fc_port_t *fcport = NULL;
1329 uint16_t mb[MAILBOX_REGISTER_COUNT];
1330 uint8_t *rsp_ptr = NULL;
1332 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1333 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1337 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1338 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1339 ql_log(ql_log_warn, vha, 0x7048,
1340 "Invalid destination type.\n");
1344 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1345 if (fcport->port_type != FCT_TARGET)
1348 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1349 fcport->port_name, sizeof(fcport->port_name)))
1357 ql_log(ql_log_warn, vha, 0x7049,
1358 "Failed to find port.\n");
1362 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1363 ql_log(ql_log_warn, vha, 0x704a,
1364 "Port is not online.\n");
1368 if (fcport->flags & FCF_LOGIN_NEEDED) {
1369 ql_log(ql_log_warn, vha, 0x704b,
1370 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1374 if (port_param->mode)
1375 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1376 port_param->speed, mb);
1378 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1379 &port_param->speed, mb);
1382 ql_log(ql_log_warn, vha, 0x704c,
1383 "iiDMA cmd failed for %8phN -- "
1384 "%04x %x %04x %04x.\n", fcport->port_name,
1385 rval, fcport->fp_speed, mb[0], mb[1]);
1386 rval = (DID_ERROR << 16);
1388 if (!port_param->mode) {
1389 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1390 sizeof(struct qla_port_param);
1392 rsp_ptr = ((uint8_t *)bsg_reply) +
1393 sizeof(struct fc_bsg_reply);
1395 memcpy(rsp_ptr, port_param,
1396 sizeof(struct qla_port_param));
1399 bsg_reply->result = DID_OK;
1400 bsg_job_done(bsg_job, bsg_reply->result,
1401 bsg_reply->reply_payload_rcv_len);
1408 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1411 struct fc_bsg_request *bsg_request = bsg_job->request;
1414 struct qla_hw_data *ha = vha->hw;
1416 if (unlikely(pci_channel_offline(ha->pdev)))
1419 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1420 if (start > ha->optrom_size) {
1421 ql_log(ql_log_warn, vha, 0x7055,
1422 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1426 if (ha->optrom_state != QLA_SWAITING) {
1427 ql_log(ql_log_info, vha, 0x7056,
1428 "optrom_state %d.\n", ha->optrom_state);
1432 ha->optrom_region_start = start;
1433 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1435 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1437 else if (start == (ha->flt_region_boot * 4) ||
1438 start == (ha->flt_region_fw * 4))
1440 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1441 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1445 ql_log(ql_log_warn, vha, 0x7058,
1446 "Invalid start region 0x%x/0x%x.\n", start,
1447 bsg_job->request_payload.payload_len);
1451 ha->optrom_region_size = start +
1452 bsg_job->request_payload.payload_len > ha->optrom_size ?
1453 ha->optrom_size - start :
1454 bsg_job->request_payload.payload_len;
1455 ha->optrom_state = QLA_SWRITING;
1457 ha->optrom_region_size = start +
1458 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1459 ha->optrom_size - start :
1460 bsg_job->reply_payload.payload_len;
1461 ha->optrom_state = QLA_SREADING;
1464 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1465 if (!ha->optrom_buffer) {
1466 ql_log(ql_log_warn, vha, 0x7059,
1467 "Read: Unable to allocate memory for optrom retrieval "
1468 "(%x)\n", ha->optrom_region_size);
1470 ha->optrom_state = QLA_SWAITING;
1478 qla2x00_read_optrom(struct bsg_job *bsg_job)
1480 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1481 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1482 scsi_qla_host_t *vha = shost_priv(host);
1483 struct qla_hw_data *ha = vha->hw;
1486 if (ha->flags.nic_core_reset_hdlr_active)
1489 mutex_lock(&ha->optrom_mutex);
1490 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1492 mutex_unlock(&ha->optrom_mutex);
1496 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1497 ha->optrom_region_start, ha->optrom_region_size);
1499 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1500 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1501 ha->optrom_region_size);
1503 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1504 bsg_reply->result = DID_OK;
1505 vfree(ha->optrom_buffer);
1506 ha->optrom_buffer = NULL;
1507 ha->optrom_state = QLA_SWAITING;
1508 mutex_unlock(&ha->optrom_mutex);
1509 bsg_job_done(bsg_job, bsg_reply->result,
1510 bsg_reply->reply_payload_rcv_len);
1515 qla2x00_update_optrom(struct bsg_job *bsg_job)
1517 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1518 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1519 scsi_qla_host_t *vha = shost_priv(host);
1520 struct qla_hw_data *ha = vha->hw;
1523 mutex_lock(&ha->optrom_mutex);
1524 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1526 mutex_unlock(&ha->optrom_mutex);
1530 /* Set the isp82xx_no_md_cap not to capture minidump */
1531 ha->flags.isp82xx_no_md_cap = 1;
1533 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1534 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1535 ha->optrom_region_size);
1537 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1538 ha->optrom_region_start, ha->optrom_region_size);
1541 bsg_reply->result = -EINVAL;
1544 bsg_reply->result = DID_OK;
1546 vfree(ha->optrom_buffer);
1547 ha->optrom_buffer = NULL;
1548 ha->optrom_state = QLA_SWAITING;
1549 mutex_unlock(&ha->optrom_mutex);
1550 bsg_job_done(bsg_job, bsg_reply->result,
1551 bsg_reply->reply_payload_rcv_len);
1556 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1558 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1559 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1560 scsi_qla_host_t *vha = shost_priv(host);
1561 struct qla_hw_data *ha = vha->hw;
1563 uint8_t bsg[DMA_POOL_SIZE];
1564 struct qla_image_version_list *list = (void *)bsg;
1565 struct qla_image_version *image;
1568 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1571 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1572 EXT_STATUS_NO_MEMORY;
1576 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1577 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1579 image = list->version;
1580 count = list->count;
1582 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1583 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1584 image->field_address.device, image->field_address.offset,
1585 sizeof(image->field_info), image->field_address.option);
1587 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1594 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1597 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1600 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1601 bsg_reply->result = DID_OK << 16;
1602 bsg_job_done(bsg_job, bsg_reply->result,
1603 bsg_reply->reply_payload_rcv_len);
1609 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1611 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1612 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1613 scsi_qla_host_t *vha = shost_priv(host);
1614 struct qla_hw_data *ha = vha->hw;
1616 uint8_t bsg[DMA_POOL_SIZE];
1617 struct qla_status_reg *sr = (void *)bsg;
1619 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 EXT_STATUS_NO_MEMORY;
1627 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1628 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1630 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1631 sr->field_address.device, sr->field_address.offset,
1632 sizeof(sr->status_reg), sr->field_address.option);
1633 sr->status_reg = *sfp;
1636 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1641 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1642 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1644 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1647 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1650 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1651 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1652 bsg_reply->result = DID_OK << 16;
1653 bsg_job_done(bsg_job, bsg_reply->result,
1654 bsg_reply->reply_payload_rcv_len);
1660 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1662 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1663 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1664 scsi_qla_host_t *vha = shost_priv(host);
1665 struct qla_hw_data *ha = vha->hw;
1667 uint8_t bsg[DMA_POOL_SIZE];
1668 struct qla_status_reg *sr = (void *)bsg;
1670 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1673 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1674 EXT_STATUS_NO_MEMORY;
1678 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1679 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1681 *sfp = sr->status_reg;
1682 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1683 sr->field_address.device, sr->field_address.offset,
1684 sizeof(sr->status_reg), sr->field_address.option);
1687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1692 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1695 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1698 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1699 bsg_reply->result = DID_OK << 16;
1700 bsg_job_done(bsg_job, bsg_reply->result,
1701 bsg_reply->reply_payload_rcv_len);
1707 qla2x00_write_i2c(struct bsg_job *bsg_job)
1709 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1710 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1711 scsi_qla_host_t *vha = shost_priv(host);
1712 struct qla_hw_data *ha = vha->hw;
1714 uint8_t bsg[DMA_POOL_SIZE];
1715 struct qla_i2c_access *i2c = (void *)bsg;
1717 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1720 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1721 EXT_STATUS_NO_MEMORY;
1725 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1726 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1728 memcpy(sfp, i2c->buffer, i2c->length);
1729 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1730 i2c->device, i2c->offset, i2c->length, i2c->option);
1733 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1738 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1741 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1744 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1745 bsg_reply->result = DID_OK << 16;
1746 bsg_job_done(bsg_job, bsg_reply->result,
1747 bsg_reply->reply_payload_rcv_len);
1753 qla2x00_read_i2c(struct bsg_job *bsg_job)
1755 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1756 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1757 scsi_qla_host_t *vha = shost_priv(host);
1758 struct qla_hw_data *ha = vha->hw;
1760 uint8_t bsg[DMA_POOL_SIZE];
1761 struct qla_i2c_access *i2c = (void *)bsg;
1763 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1766 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1767 EXT_STATUS_NO_MEMORY;
1771 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1772 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1774 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1775 i2c->device, i2c->offset, i2c->length, i2c->option);
1778 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1783 memcpy(i2c->buffer, sfp, i2c->length);
1784 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1785 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1787 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1790 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1793 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1794 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1795 bsg_reply->result = DID_OK << 16;
1796 bsg_job_done(bsg_job, bsg_reply->result,
1797 bsg_reply->reply_payload_rcv_len);
1803 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1805 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1806 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1807 scsi_qla_host_t *vha = shost_priv(host);
1808 struct qla_hw_data *ha = vha->hw;
1809 uint32_t rval = EXT_STATUS_OK;
1810 uint16_t req_sg_cnt = 0;
1811 uint16_t rsp_sg_cnt = 0;
1812 uint16_t nextlid = 0;
1815 uint32_t req_data_len;
1816 uint32_t rsp_data_len;
1818 /* Check the type of the adapter */
1819 if (!IS_BIDI_CAPABLE(ha)) {
1820 ql_log(ql_log_warn, vha, 0x70a0,
1821 "This adapter is not supported\n");
1822 rval = EXT_STATUS_NOT_SUPPORTED;
1826 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1827 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1828 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1829 rval = EXT_STATUS_BUSY;
1833 /* Check if host is online */
1834 if (!vha->flags.online) {
1835 ql_log(ql_log_warn, vha, 0x70a1,
1836 "Host is not online\n");
1837 rval = EXT_STATUS_DEVICE_OFFLINE;
1841 /* Check if cable is plugged in or not */
1842 if (vha->device_flags & DFLG_NO_CABLE) {
1843 ql_log(ql_log_warn, vha, 0x70a2,
1844 "Cable is unplugged...\n");
1845 rval = EXT_STATUS_INVALID_CFG;
1849 /* Check if the switch is connected or not */
1850 if (ha->current_topology != ISP_CFG_F) {
1851 ql_log(ql_log_warn, vha, 0x70a3,
1852 "Host is not connected to the switch\n");
1853 rval = EXT_STATUS_INVALID_CFG;
1857 /* Check if operating mode is P2P */
1858 if (ha->operating_mode != P2P) {
1859 ql_log(ql_log_warn, vha, 0x70a4,
1860 "Host operating mode is not P2p\n");
1861 rval = EXT_STATUS_INVALID_CFG;
1865 mutex_lock(&ha->selflogin_lock);
1866 if (vha->self_login_loop_id == 0) {
1867 /* Initialize all required fields of fcport */
1868 vha->bidir_fcport.vha = vha;
1869 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1870 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1871 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1872 vha->bidir_fcport.loop_id = vha->loop_id;
1874 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1875 ql_log(ql_log_warn, vha, 0x70a7,
1876 "Failed to login port %06X for bidirectional IOCB\n",
1877 vha->bidir_fcport.d_id.b24);
1878 mutex_unlock(&ha->selflogin_lock);
1879 rval = EXT_STATUS_MAILBOX;
1882 vha->self_login_loop_id = nextlid - 1;
1885 /* Assign the self login loop id to fcport */
1886 mutex_unlock(&ha->selflogin_lock);
1888 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1890 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1891 bsg_job->request_payload.sg_list,
1892 bsg_job->request_payload.sg_cnt,
1896 rval = EXT_STATUS_NO_MEMORY;
1900 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1901 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1905 rval = EXT_STATUS_NO_MEMORY;
1906 goto done_unmap_req_sg;
1909 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1910 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1911 ql_dbg(ql_dbg_user, vha, 0x70a9,
1912 "Dma mapping resulted in different sg counts "
1913 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1914 "%x dma_reply_sg_cnt: %x]\n",
1915 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1916 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1917 rval = EXT_STATUS_NO_MEMORY;
1921 req_data_len = bsg_job->request_payload.payload_len;
1922 rsp_data_len = bsg_job->reply_payload.payload_len;
1924 if (req_data_len != rsp_data_len) {
1925 rval = EXT_STATUS_BUSY;
1926 ql_log(ql_log_warn, vha, 0x70aa,
1927 "req_data_len != rsp_data_len\n");
1931 /* Alloc SRB structure */
1932 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1934 ql_dbg(ql_dbg_user, vha, 0x70ac,
1935 "Alloc SRB structure failed\n");
1936 rval = EXT_STATUS_NO_MEMORY;
1940 /*Populate srb->ctx with bidir ctx*/
1941 sp->u.bsg_job = bsg_job;
1942 sp->free = qla2x00_bsg_sp_free;
1943 sp->type = SRB_BIDI_CMD;
1944 sp->done = qla2x00_bsg_job_done;
1946 /* Add the read and write sg count */
1947 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1949 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1950 if (rval != EXT_STATUS_OK)
1952 /* the bsg request will be completed in the interrupt handler */
1956 mempool_free(sp, ha->srb_mempool);
1958 dma_unmap_sg(&ha->pdev->dev,
1959 bsg_job->reply_payload.sg_list,
1960 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1962 dma_unmap_sg(&ha->pdev->dev,
1963 bsg_job->request_payload.sg_list,
1964 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1967 /* Return an error vendor specific response
1968 * and complete the bsg request
1970 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1971 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1972 bsg_reply->reply_payload_rcv_len = 0;
1973 bsg_reply->result = (DID_OK) << 16;
1974 bsg_job_done(bsg_job, bsg_reply->result,
1975 bsg_reply->reply_payload_rcv_len);
1976 /* Always return success, vendor rsp carries correct status */
1981 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1983 struct fc_bsg_request *bsg_request = bsg_job->request;
1984 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1985 scsi_qla_host_t *vha = shost_priv(host);
1986 struct qla_hw_data *ha = vha->hw;
1987 int rval = (DID_ERROR << 16);
1988 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1990 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1991 struct fc_port *fcport;
1992 char *type = "FC_BSG_HST_FX_MGMT";
1994 /* Copy the IOCB specific information */
1995 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1996 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1998 /* Dump the vendor information */
1999 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2000 piocb_rqst, sizeof(*piocb_rqst));
2002 if (!vha->flags.online) {
2003 ql_log(ql_log_warn, vha, 0x70d0,
2004 "Host is not online.\n");
2009 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2010 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2011 bsg_job->request_payload.sg_list,
2012 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2014 ql_log(ql_log_warn, vha, 0x70c7,
2015 "dma_map_sg return %d for request\n", req_sg_cnt);
2021 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2022 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2023 bsg_job->reply_payload.sg_list,
2024 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2026 ql_log(ql_log_warn, vha, 0x70c8,
2027 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2029 goto done_unmap_req_sg;
2033 ql_dbg(ql_dbg_user, vha, 0x70c9,
2034 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2035 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2036 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2038 /* Allocate a dummy fcport structure, since functions preparing the
2039 * IOCB and mailbox command retrieves port specific information
2040 * from fcport structure. For Host based ELS commands there will be
2041 * no fcport structure allocated
2043 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2045 ql_log(ql_log_warn, vha, 0x70ca,
2046 "Failed to allocate fcport.\n");
2048 goto done_unmap_rsp_sg;
2051 /* Alloc SRB structure */
2052 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2054 ql_log(ql_log_warn, vha, 0x70cb,
2055 "qla2x00_get_sp failed.\n");
2057 goto done_free_fcport;
2060 /* Initialize all required fields of fcport */
2062 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2064 sp->type = SRB_FXIOCB_BCMD;
2065 sp->name = "bsg_fx_mgmt";
2066 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2067 sp->u.bsg_job = bsg_job;
2068 sp->free = qla2x00_bsg_sp_free;
2069 sp->done = qla2x00_bsg_job_done;
2071 ql_dbg(ql_dbg_user, vha, 0x70cc,
2072 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2073 type, piocb_rqst->func_type, fcport->loop_id);
2075 rval = qla2x00_start_sp(sp);
2076 if (rval != QLA_SUCCESS) {
2077 ql_log(ql_log_warn, vha, 0x70cd,
2078 "qla2x00_start_sp failed=%d.\n", rval);
2079 mempool_free(sp, ha->srb_mempool);
2081 goto done_free_fcport;
2086 qla2x00_free_fcport(fcport);
2089 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2090 dma_unmap_sg(&ha->pdev->dev,
2091 bsg_job->reply_payload.sg_list,
2092 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2094 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2095 dma_unmap_sg(&ha->pdev->dev,
2096 bsg_job->request_payload.sg_list,
2097 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2104 qla26xx_serdes_op(struct bsg_job *bsg_job)
2106 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2107 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2108 scsi_qla_host_t *vha = shost_priv(host);
2110 struct qla_serdes_reg sr;
2112 memset(&sr, 0, sizeof(sr));
2114 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2115 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2118 case INT_SC_SERDES_WRITE_REG:
2119 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2120 bsg_reply->reply_payload_rcv_len = 0;
2122 case INT_SC_SERDES_READ_REG:
2123 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2124 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2125 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2126 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2129 ql_dbg(ql_dbg_user, vha, 0x708c,
2130 "Unknown serdes cmd %x.\n", sr.cmd);
2135 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2136 rval ? EXT_STATUS_MAILBOX : 0;
2138 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2139 bsg_reply->result = DID_OK << 16;
2140 bsg_job_done(bsg_job, bsg_reply->result,
2141 bsg_reply->reply_payload_rcv_len);
2146 qla8044_serdes_op(struct bsg_job *bsg_job)
2148 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2149 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2150 scsi_qla_host_t *vha = shost_priv(host);
2152 struct qla_serdes_reg_ex sr;
2154 memset(&sr, 0, sizeof(sr));
2156 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2157 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2160 case INT_SC_SERDES_WRITE_REG:
2161 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2162 bsg_reply->reply_payload_rcv_len = 0;
2164 case INT_SC_SERDES_READ_REG:
2165 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2166 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2167 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2168 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2171 ql_dbg(ql_dbg_user, vha, 0x7020,
2172 "Unknown serdes cmd %x.\n", sr.cmd);
2177 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2178 rval ? EXT_STATUS_MAILBOX : 0;
2180 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2181 bsg_reply->result = DID_OK << 16;
2182 bsg_job_done(bsg_job, bsg_reply->result,
2183 bsg_reply->reply_payload_rcv_len);
2188 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2190 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2191 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2192 scsi_qla_host_t *vha = shost_priv(host);
2193 struct qla_hw_data *ha = vha->hw;
2194 struct qla_flash_update_caps cap;
2196 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2199 memset(&cap, 0, sizeof(cap));
2200 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2201 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2202 (uint64_t)ha->fw_attributes_h << 16 |
2203 (uint64_t)ha->fw_attributes;
2205 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2206 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2207 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2212 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2213 bsg_reply->result = DID_OK << 16;
2214 bsg_job_done(bsg_job, bsg_reply->result,
2215 bsg_reply->reply_payload_rcv_len);
2220 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2222 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2223 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2224 scsi_qla_host_t *vha = shost_priv(host);
2225 struct qla_hw_data *ha = vha->hw;
2226 uint64_t online_fw_attr = 0;
2227 struct qla_flash_update_caps cap;
2229 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2232 memset(&cap, 0, sizeof(cap));
2233 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2234 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2236 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2237 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2238 (uint64_t)ha->fw_attributes_h << 16 |
2239 (uint64_t)ha->fw_attributes;
2241 if (online_fw_attr != cap.capabilities) {
2242 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2243 EXT_STATUS_INVALID_PARAM;
2247 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2248 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2249 EXT_STATUS_INVALID_PARAM;
2253 bsg_reply->reply_payload_rcv_len = 0;
2255 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2258 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2259 bsg_reply->result = DID_OK << 16;
2260 bsg_job_done(bsg_job, bsg_reply->result,
2261 bsg_reply->reply_payload_rcv_len);
2266 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2268 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2269 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2270 scsi_qla_host_t *vha = shost_priv(host);
2271 struct qla_hw_data *ha = vha->hw;
2272 struct qla_bbcr_data bbcr;
2273 uint16_t loop_id, topo, sw_cap;
2274 uint8_t domain, area, al_pa, state;
2277 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2280 memset(&bbcr, 0, sizeof(bbcr));
2282 if (vha->flags.bbcr_enable)
2283 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2285 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2287 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2288 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2289 &area, &domain, &topo, &sw_cap);
2290 if (rval != QLA_SUCCESS) {
2291 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2292 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2293 bbcr.mbx1 = loop_id;
2297 state = (vha->bbcr >> 12) & 0x1;
2300 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2301 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2303 bbcr.state = QLA_BBCR_STATE_ONLINE;
2304 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2307 bbcr.configured_bbscn = vha->bbcr & 0xf;
2311 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2312 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2313 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2315 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2317 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2318 bsg_reply->result = DID_OK << 16;
2319 bsg_job_done(bsg_job, bsg_reply->result,
2320 bsg_reply->reply_payload_rcv_len);
2325 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2327 struct fc_bsg_request *bsg_request = bsg_job->request;
2328 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2329 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2330 scsi_qla_host_t *vha = shost_priv(host);
2331 struct qla_hw_data *ha = vha->hw;
2332 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2333 struct link_statistics *stats = NULL;
2334 dma_addr_t stats_dma;
2336 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2337 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2339 if (test_bit(UNLOADING, &vha->dpc_flags))
2342 if (unlikely(pci_channel_offline(ha->pdev)))
2345 if (qla2x00_reset_active(vha))
2348 if (!IS_FWI2_CAPABLE(ha))
2351 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2354 ql_log(ql_log_warn, vha, 0x70e2,
2355 "Failed to allocate memory for stats.\n");
2359 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2361 if (rval == QLA_SUCCESS) {
2362 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2363 stats, sizeof(*stats));
2364 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2365 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2368 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2369 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2370 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2372 bsg_job->reply_len = sizeof(*bsg_reply);
2373 bsg_reply->result = DID_OK << 16;
2374 bsg_job_done(bsg_job, bsg_reply->result,
2375 bsg_reply->reply_payload_rcv_len);
2377 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2384 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2386 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2387 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2388 scsi_qla_host_t *vha = shost_priv(host);
2390 struct qla_dport_diag *dd;
2392 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2393 !IS_QLA28XX(vha->hw))
2396 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2398 ql_log(ql_log_warn, vha, 0x70db,
2399 "Failed to allocate memory for dport.\n");
2403 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2404 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2406 rval = qla26xx_dport_diagnostics(
2407 vha, dd->buf, sizeof(dd->buf), dd->options);
2408 if (rval == QLA_SUCCESS) {
2409 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2410 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2413 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2414 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2415 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2417 bsg_job->reply_len = sizeof(*bsg_reply);
2418 bsg_reply->result = DID_OK << 16;
2419 bsg_job_done(bsg_job, bsg_reply->result,
2420 bsg_reply->reply_payload_rcv_len);
2428 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2430 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2431 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2432 struct qla_hw_data *ha = vha->hw;
2433 struct qla_active_regions regions = { };
2434 struct active_regions active_regions = { };
2436 qla27xx_get_active_image(vha, &active_regions);
2437 regions.global_image = active_regions.global;
2439 if (IS_QLA28XX(ha)) {
2440 qla28xx_get_aux_images(vha, &active_regions);
2441 regions.board_config = active_regions.aux.board_config;
2442 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2443 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2444 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2447 ql_dbg(ql_dbg_user, vha, 0x70e1,
2448 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2449 __func__, vha->host_no, regions.global_image,
2450 regions.board_config, regions.vpd_nvram,
2451 regions.npiv_config_0_1, regions.npiv_config_2_3);
2453 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2454 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2456 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2457 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2458 bsg_reply->result = DID_OK << 16;
2459 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2460 bsg_job_done(bsg_job, bsg_reply->result,
2461 bsg_reply->reply_payload_rcv_len);
2467 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2469 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2470 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2471 struct ql_vnd_mng_host_stats_param *req_data;
2472 struct ql_vnd_mng_host_stats_resp rsp_data;
2476 if (!vha->flags.online) {
2477 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2481 req_data_len = bsg_job->request_payload.payload_len;
2483 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2484 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2488 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2490 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2494 /* Copy the request buffer in req_data */
2495 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2496 bsg_job->request_payload.sg_cnt, req_data,
2499 switch (req_data->action) {
2501 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2504 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2507 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2510 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2517 /* Prepare response */
2518 rsp_data.status = ret;
2519 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2521 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2522 bsg_reply->reply_payload_rcv_len =
2523 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2524 bsg_job->reply_payload.sg_cnt,
2526 sizeof(struct ql_vnd_mng_host_stats_resp));
2528 bsg_reply->result = DID_OK;
2529 bsg_job_done(bsg_job, bsg_reply->result,
2530 bsg_reply->reply_payload_rcv_len);
2536 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2538 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2539 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2540 struct ql_vnd_stats_param *req_data;
2541 struct ql_vnd_host_stats_resp rsp_data;
2544 u64 ini_entry_count = 0;
2545 u64 entry_count = 0;
2547 u64 tmp_stat_type = 0;
2548 u64 response_len = 0;
2551 req_data_len = bsg_job->request_payload.payload_len;
2553 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2554 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2558 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2560 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2564 /* Copy the request buffer in req_data */
2565 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2566 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2568 /* Copy stat type to work on it */
2569 tmp_stat_type = req_data->stat_type;
2571 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2572 /* Num of tgts connected to this host */
2573 tgt_num = qla2x00_get_num_tgts(vha);
2575 tmp_stat_type &= ~(1 << 17);
2578 /* Total ini stats */
2579 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2581 /* Total number of entries */
2582 entry_count = ini_entry_count + tgt_num;
2584 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2585 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2587 if (response_len > bsg_job->reply_payload.payload_len) {
2588 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2589 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2590 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2592 bsg_reply->reply_payload_rcv_len =
2593 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2594 bsg_job->reply_payload.sg_cnt, &rsp_data,
2595 sizeof(struct ql_vnd_mng_host_stats_resp));
2597 bsg_reply->result = DID_OK;
2598 bsg_job_done(bsg_job, bsg_reply->result,
2599 bsg_reply->reply_payload_rcv_len);
2603 data = kzalloc(response_len, GFP_KERNEL);
2609 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2610 data, response_len);
2612 rsp_data.status = EXT_STATUS_OK;
2613 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2615 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2616 bsg_job->reply_payload.sg_cnt,
2617 data, response_len);
2618 bsg_reply->result = DID_OK;
2619 bsg_job_done(bsg_job, bsg_reply->result,
2620 bsg_reply->reply_payload_rcv_len);
2628 static struct fc_rport *
2629 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2631 fc_port_t *fcport = NULL;
2633 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2634 if (fcport->rport->number == tgt_num)
2635 return fcport->rport;
2641 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2643 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2644 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2645 struct ql_vnd_tgt_stats_param *req_data;
2648 u64 response_len = 0;
2649 struct ql_vnd_tgt_stats_resp *data = NULL;
2650 struct fc_rport *rport = NULL;
2652 if (!vha->flags.online) {
2653 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2657 req_data_len = bsg_job->request_payload.payload_len;
2659 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2660 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2664 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2666 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2670 /* Copy the request buffer in req_data */
2671 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2672 bsg_job->request_payload.sg_cnt,
2673 req_data, req_data_len);
2675 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2676 sizeof(struct ql_vnd_stat_entry);
2678 /* structure + size for one entry */
2679 data = kzalloc(response_len, GFP_KERNEL);
2685 if (response_len > bsg_job->reply_payload.payload_len) {
2686 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2688 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2690 bsg_reply->reply_payload_rcv_len =
2691 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2692 bsg_job->reply_payload.sg_cnt, data,
2693 sizeof(struct ql_vnd_tgt_stats_resp));
2695 bsg_reply->result = DID_OK;
2696 bsg_job_done(bsg_job, bsg_reply->result,
2697 bsg_reply->reply_payload_rcv_len);
2701 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2703 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2704 ret = EXT_STATUS_INVALID_PARAM;
2705 data->status = EXT_STATUS_INVALID_PARAM;
2709 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2710 rport, (void *)data, response_len);
2712 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2714 bsg_reply->reply_payload_rcv_len =
2715 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2716 bsg_job->reply_payload.sg_cnt, data,
2718 bsg_reply->result = DID_OK;
2719 bsg_job_done(bsg_job, bsg_reply->result,
2720 bsg_reply->reply_payload_rcv_len);
2730 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2732 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2733 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2734 struct ql_vnd_mng_host_port_param *req_data;
2735 struct ql_vnd_mng_host_port_resp rsp_data;
2739 req_data_len = bsg_job->request_payload.payload_len;
2741 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2742 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2746 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2748 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2752 /* Copy the request buffer in req_data */
2753 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2754 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2756 switch (req_data->action) {
2758 ret = qla2xxx_enable_port(vha->host);
2761 ret = qla2xxx_disable_port(vha->host);
2764 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2771 /* Prepare response */
2772 rsp_data.status = ret;
2773 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2774 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2776 bsg_reply->reply_payload_rcv_len =
2777 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2778 bsg_job->reply_payload.sg_cnt, &rsp_data,
2779 sizeof(struct ql_vnd_mng_host_port_resp));
2780 bsg_reply->result = DID_OK;
2781 bsg_job_done(bsg_job, bsg_reply->result,
2782 bsg_reply->reply_payload_rcv_len);
2788 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2790 struct fc_bsg_request *bsg_request = bsg_job->request;
2792 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2793 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2795 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2796 case QL_VND_LOOPBACK:
2797 return qla2x00_process_loopback(bsg_job);
2799 case QL_VND_A84_RESET:
2800 return qla84xx_reset(bsg_job);
2802 case QL_VND_A84_UPDATE_FW:
2803 return qla84xx_updatefw(bsg_job);
2805 case QL_VND_A84_MGMT_CMD:
2806 return qla84xx_mgmt_cmd(bsg_job);
2809 return qla24xx_iidma(bsg_job);
2811 case QL_VND_FCP_PRIO_CFG_CMD:
2812 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2814 case QL_VND_READ_FLASH:
2815 return qla2x00_read_optrom(bsg_job);
2817 case QL_VND_UPDATE_FLASH:
2818 return qla2x00_update_optrom(bsg_job);
2820 case QL_VND_SET_FRU_VERSION:
2821 return qla2x00_update_fru_versions(bsg_job);
2823 case QL_VND_READ_FRU_STATUS:
2824 return qla2x00_read_fru_status(bsg_job);
2826 case QL_VND_WRITE_FRU_STATUS:
2827 return qla2x00_write_fru_status(bsg_job);
2829 case QL_VND_WRITE_I2C:
2830 return qla2x00_write_i2c(bsg_job);
2832 case QL_VND_READ_I2C:
2833 return qla2x00_read_i2c(bsg_job);
2835 case QL_VND_DIAG_IO_CMD:
2836 return qla24xx_process_bidir_cmd(bsg_job);
2838 case QL_VND_FX00_MGMT_CMD:
2839 return qlafx00_mgmt_cmd(bsg_job);
2841 case QL_VND_SERDES_OP:
2842 return qla26xx_serdes_op(bsg_job);
2844 case QL_VND_SERDES_OP_EX:
2845 return qla8044_serdes_op(bsg_job);
2847 case QL_VND_GET_FLASH_UPDATE_CAPS:
2848 return qla27xx_get_flash_upd_cap(bsg_job);
2850 case QL_VND_SET_FLASH_UPDATE_CAPS:
2851 return qla27xx_set_flash_upd_cap(bsg_job);
2853 case QL_VND_GET_BBCR_DATA:
2854 return qla27xx_get_bbcr_data(bsg_job);
2856 case QL_VND_GET_PRIV_STATS:
2857 case QL_VND_GET_PRIV_STATS_EX:
2858 return qla2x00_get_priv_stats(bsg_job);
2860 case QL_VND_DPORT_DIAGNOSTICS:
2861 return qla2x00_do_dport_diagnostics(bsg_job);
2863 case QL_VND_EDIF_MGMT:
2864 return qla_edif_app_mgmt(bsg_job);
2866 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2867 return qla2x00_get_flash_image_status(bsg_job);
2869 case QL_VND_MANAGE_HOST_STATS:
2870 return qla2x00_manage_host_stats(bsg_job);
2872 case QL_VND_GET_HOST_STATS:
2873 return qla2x00_get_host_stats(bsg_job);
2875 case QL_VND_GET_TGT_STATS:
2876 return qla2x00_get_tgt_stats(bsg_job);
2878 case QL_VND_MANAGE_HOST_PORT:
2879 return qla2x00_manage_host_port(bsg_job);
2881 case QL_VND_MBX_PASSTHRU:
2882 return qla2x00_mailbox_passthru(bsg_job);
2890 qla24xx_bsg_request(struct bsg_job *bsg_job)
2892 struct fc_bsg_request *bsg_request = bsg_job->request;
2893 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2895 struct fc_rport *rport;
2896 struct Scsi_Host *host;
2897 scsi_qla_host_t *vha;
2899 /* In case no data transferred. */
2900 bsg_reply->reply_payload_rcv_len = 0;
2902 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2903 rport = fc_bsg_to_rport(bsg_job);
2904 host = rport_to_shost(rport);
2905 vha = shost_priv(host);
2907 host = fc_bsg_to_shost(bsg_job);
2908 vha = shost_priv(host);
2911 /* Disable port will bring down the chip, allow enable command */
2912 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2913 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2916 if (vha->hw->flags.port_isolated) {
2917 bsg_reply->result = DID_ERROR;
2918 /* operation not permitted */
2922 if (qla2x00_chip_is_down(vha)) {
2923 ql_dbg(ql_dbg_user, vha, 0x709f,
2924 "BSG: ISP abort active/needed -- cmd=%d.\n",
2925 bsg_request->msgcode);
2926 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2930 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
2931 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2936 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2937 "Entered %s msgcode=0x%x. bsg ptr %px\n",
2938 __func__, bsg_request->msgcode, bsg_job);
2940 switch (bsg_request->msgcode) {
2941 case FC_BSG_RPT_ELS:
2942 case FC_BSG_HST_ELS_NOLOGIN:
2943 ret = qla2x00_process_els(bsg_job);
2946 ret = qla2x00_process_ct(bsg_job);
2948 case FC_BSG_HST_VENDOR:
2949 ret = qla2x00_process_vendor_specific(vha, bsg_job);
2951 case FC_BSG_HST_ADD_RPORT:
2952 case FC_BSG_HST_DEL_RPORT:
2955 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2959 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2960 "%s done with return %x\n", __func__, ret);
2966 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2968 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2969 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2970 struct qla_hw_data *ha = vha->hw;
2973 unsigned long flags;
2974 struct req_que *req;
2976 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
2978 /* find the bsg job from the active list of commands */
2979 spin_lock_irqsave(&ha->hardware_lock, flags);
2980 for (que = 0; que < ha->max_req_queues; que++) {
2981 req = ha->req_q_map[que];
2985 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2986 sp = req->outstanding_cmds[cnt];
2988 (sp->type == SRB_CT_CMD ||
2989 sp->type == SRB_ELS_CMD_HST ||
2990 sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
2991 sp->type == SRB_FXIOCB_BCMD) &&
2992 sp->u.bsg_job == bsg_job) {
2993 req->outstanding_cmds[cnt] = NULL;
2994 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2995 if (ha->isp_ops->abort_command(sp)) {
2996 ql_log(ql_log_warn, vha, 0x7089,
2997 "mbx abort_command failed.\n");
2998 bsg_reply->result = -EIO;
3000 ql_dbg(ql_dbg_user, vha, 0x708a,
3001 "mbx abort_command success.\n");
3002 bsg_reply->result = 0;
3004 spin_lock_irqsave(&ha->hardware_lock, flags);
3010 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3011 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3012 bsg_reply->result = -ENXIO;
3016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3018 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3022 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
3024 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3025 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3027 int ptsize = sizeof(struct qla_mbx_passthru);
3028 struct qla_mbx_passthru *req_data = NULL;
3029 uint32_t req_data_len;
3031 req_data_len = bsg_job->request_payload.payload_len;
3032 if (req_data_len != ptsize) {
3033 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
3036 req_data = kzalloc(ptsize, GFP_KERNEL);
3038 ql_log(ql_log_warn, vha, 0xf0a4,
3039 "req_data memory allocation failure.\n");
3043 /* Copy the request buffer in req_data */
3044 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3045 bsg_job->request_payload.sg_cnt, req_data, ptsize);
3046 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
3048 /* Copy the req_data in request buffer */
3049 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
3050 bsg_job->reply_payload.sg_cnt, req_data, ptsize);
3052 bsg_reply->reply_payload_rcv_len = ptsize;
3053 if (ret == QLA_SUCCESS)
3054 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3056 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
3058 bsg_job->reply_len = sizeof(*bsg_job->reply);
3059 bsg_reply->result = DID_OK << 16;
3060 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);