GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_bsg.c
1         /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *ptr, int res)
17 {
18         srb_t *sp = ptr;
19         struct bsg_job *bsg_job = sp->u.bsg_job;
20         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
21
22         sp->free(sp);
23
24         bsg_reply->result = res;
25         bsg_job_done(bsg_job, bsg_reply->result,
26                        bsg_reply->reply_payload_rcv_len);
27 }
28
29 void
30 qla2x00_bsg_sp_free(void *ptr)
31 {
32         srb_t *sp = ptr;
33         struct qla_hw_data *ha = sp->vha->hw;
34         struct bsg_job *bsg_job = sp->u.bsg_job;
35         struct fc_bsg_request *bsg_request = bsg_job->request;
36         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
37
38         if (sp->type == SRB_FXIOCB_BCMD) {
39                 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
40                     &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
41
42                 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
43                         dma_unmap_sg(&ha->pdev->dev,
44                             bsg_job->request_payload.sg_list,
45                             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
46
47                 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
48                         dma_unmap_sg(&ha->pdev->dev,
49                             bsg_job->reply_payload.sg_list,
50                             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
51         } else {
52                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
53                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
54
55                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
56                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
57         }
58
59         if (sp->type == SRB_CT_CMD ||
60             sp->type == SRB_FXIOCB_BCMD ||
61             sp->type == SRB_ELS_CMD_HST)
62                 kfree(sp->fcport);
63         qla2x00_rel_sp(sp);
64 }
65
66 int
67 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
68         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
69 {
70         int i, ret, num_valid;
71         uint8_t *bcode;
72         struct qla_fcp_prio_entry *pri_entry;
73         uint32_t *bcode_val_ptr, bcode_val;
74
75         ret = 1;
76         num_valid = 0;
77         bcode = (uint8_t *)pri_cfg;
78         bcode_val_ptr = (uint32_t *)pri_cfg;
79         bcode_val = (uint32_t)(*bcode_val_ptr);
80
81         if (bcode_val == 0xFFFFFFFF) {
82                 /* No FCP Priority config data in flash */
83                 ql_dbg(ql_dbg_user, vha, 0x7051,
84                     "No FCP Priority config data.\n");
85                 return 0;
86         }
87
88         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
89                         bcode[3] != 'S') {
90                 /* Invalid FCP priority data header*/
91                 ql_dbg(ql_dbg_user, vha, 0x7052,
92                     "Invalid FCP Priority data header. bcode=0x%x.\n",
93                     bcode_val);
94                 return 0;
95         }
96         if (flag != 1)
97                 return ret;
98
99         pri_entry = &pri_cfg->entry[0];
100         for (i = 0; i < pri_cfg->num_entries; i++) {
101                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
102                         num_valid++;
103                 pri_entry++;
104         }
105
106         if (num_valid == 0) {
107                 /* No valid FCP priority data entries */
108                 ql_dbg(ql_dbg_user, vha, 0x7053,
109                     "No valid FCP Priority data entries.\n");
110                 ret = 0;
111         } else {
112                 /* FCP priority data is valid */
113                 ql_dbg(ql_dbg_user, vha, 0x7054,
114                     "Valid FCP priority data. num entries = %d.\n",
115                     num_valid);
116         }
117
118         return ret;
119 }
120
121 static int
122 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
123 {
124         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
125         struct fc_bsg_request *bsg_request = bsg_job->request;
126         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
127         scsi_qla_host_t *vha = shost_priv(host);
128         struct qla_hw_data *ha = vha->hw;
129         int ret = 0;
130         uint32_t len;
131         uint32_t oper;
132
133         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
134                 ret = -EINVAL;
135                 goto exit_fcp_prio_cfg;
136         }
137
138         /* Get the sub command */
139         oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
140
141         /* Only set config is allowed if config memory is not allocated */
142         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
143                 ret = -EINVAL;
144                 goto exit_fcp_prio_cfg;
145         }
146         switch (oper) {
147         case QLFC_FCP_PRIO_DISABLE:
148                 if (ha->flags.fcp_prio_enabled) {
149                         ha->flags.fcp_prio_enabled = 0;
150                         ha->fcp_prio_cfg->attributes &=
151                                 ~FCP_PRIO_ATTR_ENABLE;
152                         qla24xx_update_all_fcp_prio(vha);
153                         bsg_reply->result = DID_OK;
154                 } else {
155                         ret = -EINVAL;
156                         bsg_reply->result = (DID_ERROR << 16);
157                         goto exit_fcp_prio_cfg;
158                 }
159                 break;
160
161         case QLFC_FCP_PRIO_ENABLE:
162                 if (!ha->flags.fcp_prio_enabled) {
163                         if (ha->fcp_prio_cfg) {
164                                 ha->flags.fcp_prio_enabled = 1;
165                                 ha->fcp_prio_cfg->attributes |=
166                                     FCP_PRIO_ATTR_ENABLE;
167                                 qla24xx_update_all_fcp_prio(vha);
168                                 bsg_reply->result = DID_OK;
169                         } else {
170                                 ret = -EINVAL;
171                                 bsg_reply->result = (DID_ERROR << 16);
172                                 goto exit_fcp_prio_cfg;
173                         }
174                 }
175                 break;
176
177         case QLFC_FCP_PRIO_GET_CONFIG:
178                 len = bsg_job->reply_payload.payload_len;
179                 if (!len || len > FCP_PRIO_CFG_SIZE) {
180                         ret = -EINVAL;
181                         bsg_reply->result = (DID_ERROR << 16);
182                         goto exit_fcp_prio_cfg;
183                 }
184
185                 bsg_reply->result = DID_OK;
186                 bsg_reply->reply_payload_rcv_len =
187                         sg_copy_from_buffer(
188                         bsg_job->reply_payload.sg_list,
189                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
190                         len);
191
192                 break;
193
194         case QLFC_FCP_PRIO_SET_CONFIG:
195                 len = bsg_job->request_payload.payload_len;
196                 if (!len || len > FCP_PRIO_CFG_SIZE) {
197                         bsg_reply->result = (DID_ERROR << 16);
198                         ret = -EINVAL;
199                         goto exit_fcp_prio_cfg;
200                 }
201
202                 if (!ha->fcp_prio_cfg) {
203                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
204                         if (!ha->fcp_prio_cfg) {
205                                 ql_log(ql_log_warn, vha, 0x7050,
206                                     "Unable to allocate memory for fcp prio "
207                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
208                                 bsg_reply->result = (DID_ERROR << 16);
209                                 ret = -ENOMEM;
210                                 goto exit_fcp_prio_cfg;
211                         }
212                 }
213
214                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
215                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
216                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
217                         FCP_PRIO_CFG_SIZE);
218
219                 /* validate fcp priority data */
220
221                 if (!qla24xx_fcp_prio_cfg_valid(vha,
222                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
223                         bsg_reply->result = (DID_ERROR << 16);
224                         ret = -EINVAL;
225                         /* If buffer was invalidatic int
226                          * fcp_prio_cfg is of no use
227                          */
228                         vfree(ha->fcp_prio_cfg);
229                         ha->fcp_prio_cfg = NULL;
230                         goto exit_fcp_prio_cfg;
231                 }
232
233                 ha->flags.fcp_prio_enabled = 0;
234                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
235                         ha->flags.fcp_prio_enabled = 1;
236                 qla24xx_update_all_fcp_prio(vha);
237                 bsg_reply->result = DID_OK;
238                 break;
239         default:
240                 ret = -EINVAL;
241                 break;
242         }
243 exit_fcp_prio_cfg:
244         if (!ret)
245                 bsg_job_done(bsg_job, bsg_reply->result,
246                                bsg_reply->reply_payload_rcv_len);
247         return ret;
248 }
249
250 static int
251 qla2x00_process_els(struct bsg_job *bsg_job)
252 {
253         struct fc_bsg_request *bsg_request = bsg_job->request;
254         struct fc_rport *rport;
255         fc_port_t *fcport = NULL;
256         struct Scsi_Host *host;
257         scsi_qla_host_t *vha;
258         struct qla_hw_data *ha;
259         srb_t *sp;
260         const char *type;
261         int req_sg_cnt, rsp_sg_cnt;
262         int rval =  (DID_ERROR << 16);
263         uint16_t nextlid = 0;
264
265         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
266                 rport = fc_bsg_to_rport(bsg_job);
267                 fcport = *(fc_port_t **) rport->dd_data;
268                 host = rport_to_shost(rport);
269                 vha = shost_priv(host);
270                 ha = vha->hw;
271                 type = "FC_BSG_RPT_ELS";
272         } else {
273                 host = fc_bsg_to_shost(bsg_job);
274                 vha = shost_priv(host);
275                 ha = vha->hw;
276                 type = "FC_BSG_HST_ELS_NOLOGIN";
277         }
278
279         if (!vha->flags.online) {
280                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
281                 rval = -EIO;
282                 goto done;
283         }
284
285         /* pass through is supported only for ISP 4Gb or higher */
286         if (!IS_FWI2_CAPABLE(ha)) {
287                 ql_dbg(ql_dbg_user, vha, 0x7001,
288                     "ELS passthru not supported for ISP23xx based adapters.\n");
289                 rval = -EPERM;
290                 goto done;
291         }
292
293         /*  Multiple SG's are not supported for ELS requests */
294         if (bsg_job->request_payload.sg_cnt > 1 ||
295                 bsg_job->reply_payload.sg_cnt > 1) {
296                 ql_dbg(ql_dbg_user, vha, 0x7002,
297                     "Multiple SG's are not supported for ELS requests, "
298                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
299                     bsg_job->request_payload.sg_cnt,
300                     bsg_job->reply_payload.sg_cnt);
301                 rval = -EPERM;
302                 goto done;
303         }
304
305         /* ELS request for rport */
306         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
307                 /* make sure the rport is logged in,
308                  * if not perform fabric login
309                  */
310                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
311                         ql_dbg(ql_dbg_user, vha, 0x7003,
312                             "Failed to login port %06X for ELS passthru.\n",
313                             fcport->d_id.b24);
314                         rval = -EIO;
315                         goto done;
316                 }
317         } else {
318                 /* Allocate a dummy fcport structure, since functions
319                  * preparing the IOCB and mailbox command retrieves port
320                  * specific information from fcport structure. For Host based
321                  * ELS commands there will be no fcport structure allocated
322                  */
323                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
324                 if (!fcport) {
325                         rval = -ENOMEM;
326                         goto done;
327                 }
328
329                 /* Initialize all required  fields of fcport */
330                 fcport->vha = vha;
331                 fcport->d_id.b.al_pa =
332                         bsg_request->rqst_data.h_els.port_id[0];
333                 fcport->d_id.b.area =
334                         bsg_request->rqst_data.h_els.port_id[1];
335                 fcport->d_id.b.domain =
336                         bsg_request->rqst_data.h_els.port_id[2];
337                 fcport->loop_id =
338                         (fcport->d_id.b.al_pa == 0xFD) ?
339                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
340         }
341
342         req_sg_cnt =
343                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
344                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
345         if (!req_sg_cnt) {
346                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
347                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
348                 rval = -ENOMEM;
349                 goto done_free_fcport;
350         }
351
352         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
353                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
354         if (!rsp_sg_cnt) {
355                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
356                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
357                 rval = -ENOMEM;
358                 goto done_free_fcport;
359         }
360
361         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
362                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
363                 ql_log(ql_log_warn, vha, 0x7008,
364                     "dma mapping resulted in different sg counts, "
365                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
366                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
367                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
368                 rval = -EAGAIN;
369                 goto done_unmap_sg;
370         }
371
372         /* Alloc SRB structure */
373         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
374         if (!sp) {
375                 rval = -ENOMEM;
376                 goto done_unmap_sg;
377         }
378
379         sp->type =
380                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
381                  SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
382         sp->name =
383                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
384                  "bsg_els_rpt" : "bsg_els_hst");
385         sp->u.bsg_job = bsg_job;
386         sp->free = qla2x00_bsg_sp_free;
387         sp->done = qla2x00_bsg_job_done;
388
389         ql_dbg(ql_dbg_user, vha, 0x700a,
390             "bsg rqst type: %s els type: %x - loop-id=%x "
391             "portid=%-2x%02x%02x.\n", type,
392             bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
393             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
394
395         rval = qla2x00_start_sp(sp);
396         if (rval != QLA_SUCCESS) {
397                 ql_log(ql_log_warn, vha, 0x700e,
398                     "qla2x00_start_sp failed = %d\n", rval);
399                 qla2x00_rel_sp(sp);
400                 rval = -EIO;
401                 goto done_unmap_sg;
402         }
403         return rval;
404
405 done_unmap_sg:
406         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
407                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
408         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
409                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
410         goto done_free_fcport;
411
412 done_free_fcport:
413         if (bsg_request->msgcode == FC_BSG_RPT_ELS)
414                 kfree(fcport);
415 done:
416         return rval;
417 }
418
419 static inline uint16_t
420 qla24xx_calc_ct_iocbs(uint16_t dsds)
421 {
422         uint16_t iocbs;
423
424         iocbs = 1;
425         if (dsds > 2) {
426                 iocbs += (dsds - 2) / 5;
427                 if ((dsds - 2) % 5)
428                         iocbs++;
429         }
430         return iocbs;
431 }
432
433 static int
434 qla2x00_process_ct(struct bsg_job *bsg_job)
435 {
436         srb_t *sp;
437         struct fc_bsg_request *bsg_request = bsg_job->request;
438         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
439         scsi_qla_host_t *vha = shost_priv(host);
440         struct qla_hw_data *ha = vha->hw;
441         int rval = (DID_ERROR << 16);
442         int req_sg_cnt, rsp_sg_cnt;
443         uint16_t loop_id;
444         struct fc_port *fcport;
445         char  *type = "FC_BSG_HST_CT";
446
447         req_sg_cnt =
448                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
449                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
450         if (!req_sg_cnt) {
451                 ql_log(ql_log_warn, vha, 0x700f,
452                     "dma_map_sg return %d for request\n", req_sg_cnt);
453                 rval = -ENOMEM;
454                 goto done;
455         }
456
457         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
458                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
459         if (!rsp_sg_cnt) {
460                 ql_log(ql_log_warn, vha, 0x7010,
461                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
462                 rval = -ENOMEM;
463                 goto done;
464         }
465
466         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
467             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
468                 ql_log(ql_log_warn, vha, 0x7011,
469                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
470                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
471                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
472                 rval = -EAGAIN;
473                 goto done_unmap_sg;
474         }
475
476         if (!vha->flags.online) {
477                 ql_log(ql_log_warn, vha, 0x7012,
478                     "Host is not online.\n");
479                 rval = -EIO;
480                 goto done_unmap_sg;
481         }
482
483         loop_id =
484                 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
485                         >> 24;
486         switch (loop_id) {
487         case 0xFC:
488                 loop_id = cpu_to_le16(NPH_SNS);
489                 break;
490         case 0xFA:
491                 loop_id = vha->mgmt_svr_loop_id;
492                 break;
493         default:
494                 ql_dbg(ql_dbg_user, vha, 0x7013,
495                     "Unknown loop id: %x.\n", loop_id);
496                 rval = -EINVAL;
497                 goto done_unmap_sg;
498         }
499
500         /* Allocate a dummy fcport structure, since functions preparing the
501          * IOCB and mailbox command retrieves port specific information
502          * from fcport structure. For Host based ELS commands there will be
503          * no fcport structure allocated
504          */
505         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
506         if (!fcport) {
507                 ql_log(ql_log_warn, vha, 0x7014,
508                     "Failed to allocate fcport.\n");
509                 rval = -ENOMEM;
510                 goto done_unmap_sg;
511         }
512
513         /* Initialize all required  fields of fcport */
514         fcport->vha = vha;
515         fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
516         fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
517         fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
518         fcport->loop_id = loop_id;
519
520         /* Alloc SRB structure */
521         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
522         if (!sp) {
523                 ql_log(ql_log_warn, vha, 0x7015,
524                     "qla2x00_get_sp failed.\n");
525                 rval = -ENOMEM;
526                 goto done_free_fcport;
527         }
528
529         sp->type = SRB_CT_CMD;
530         sp->name = "bsg_ct";
531         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
532         sp->u.bsg_job = bsg_job;
533         sp->free = qla2x00_bsg_sp_free;
534         sp->done = qla2x00_bsg_job_done;
535
536         ql_dbg(ql_dbg_user, vha, 0x7016,
537             "bsg rqst type: %s else type: %x - "
538             "loop-id=%x portid=%02x%02x%02x.\n", type,
539             (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
540             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
541             fcport->d_id.b.al_pa);
542
543         rval = qla2x00_start_sp(sp);
544         if (rval != QLA_SUCCESS) {
545                 ql_log(ql_log_warn, vha, 0x7017,
546                     "qla2x00_start_sp failed=%d.\n", rval);
547                 qla2x00_rel_sp(sp);
548                 rval = -EIO;
549                 goto done_free_fcport;
550         }
551         return rval;
552
553 done_free_fcport:
554         kfree(fcport);
555 done_unmap_sg:
556         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
557                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
558         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
559                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
560 done:
561         return rval;
562 }
563
564 /* Disable loopback mode */
565 static inline int
566 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
567                             int wait, int wait2)
568 {
569         int ret = 0;
570         int rval = 0;
571         uint16_t new_config[4];
572         struct qla_hw_data *ha = vha->hw;
573
574         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
575                 goto done_reset_internal;
576
577         memset(new_config, 0 , sizeof(new_config));
578         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579             ENABLE_INTERNAL_LOOPBACK ||
580             (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
581             ENABLE_EXTERNAL_LOOPBACK) {
582                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
583                 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
584                     (new_config[0] & INTERNAL_LOOPBACK_MASK));
585                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
586
587                 ha->notify_dcbx_comp = wait;
588                 ha->notify_lb_portup_comp = wait2;
589
590                 ret = qla81xx_set_port_config(vha, new_config);
591                 if (ret != QLA_SUCCESS) {
592                         ql_log(ql_log_warn, vha, 0x7025,
593                             "Set port config failed.\n");
594                         ha->notify_dcbx_comp = 0;
595                         ha->notify_lb_portup_comp = 0;
596                         rval = -EINVAL;
597                         goto done_reset_internal;
598                 }
599
600                 /* Wait for DCBX complete event */
601                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
602                         (DCBX_COMP_TIMEOUT * HZ))) {
603                         ql_dbg(ql_dbg_user, vha, 0x7026,
604                             "DCBX completion not received.\n");
605                         ha->notify_dcbx_comp = 0;
606                         ha->notify_lb_portup_comp = 0;
607                         rval = -EINVAL;
608                         goto done_reset_internal;
609                 } else
610                         ql_dbg(ql_dbg_user, vha, 0x7027,
611                             "DCBX completion received.\n");
612
613                 if (wait2 &&
614                     !wait_for_completion_timeout(&ha->lb_portup_comp,
615                     (LB_PORTUP_COMP_TIMEOUT * HZ))) {
616                         ql_dbg(ql_dbg_user, vha, 0x70c5,
617                             "Port up completion not received.\n");
618                         ha->notify_lb_portup_comp = 0;
619                         rval = -EINVAL;
620                         goto done_reset_internal;
621                 } else
622                         ql_dbg(ql_dbg_user, vha, 0x70c6,
623                             "Port up completion received.\n");
624
625                 ha->notify_dcbx_comp = 0;
626                 ha->notify_lb_portup_comp = 0;
627         }
628 done_reset_internal:
629         return rval;
630 }
631
632 /*
633  * Set the port configuration to enable the internal or external loopback
634  * depending on the loopback mode.
635  */
636 static inline int
637 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
638         uint16_t *new_config, uint16_t mode)
639 {
640         int ret = 0;
641         int rval = 0;
642         unsigned long rem_tmo = 0, current_tmo = 0;
643         struct qla_hw_data *ha = vha->hw;
644
645         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
646                 goto done_set_internal;
647
648         if (mode == INTERNAL_LOOPBACK)
649                 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
650         else if (mode == EXTERNAL_LOOPBACK)
651                 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
652         ql_dbg(ql_dbg_user, vha, 0x70be,
653              "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
654
655         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
656
657         ha->notify_dcbx_comp = 1;
658         ret = qla81xx_set_port_config(vha, new_config);
659         if (ret != QLA_SUCCESS) {
660                 ql_log(ql_log_warn, vha, 0x7021,
661                     "set port config failed.\n");
662                 ha->notify_dcbx_comp = 0;
663                 rval = -EINVAL;
664                 goto done_set_internal;
665         }
666
667         /* Wait for DCBX complete event */
668         current_tmo = DCBX_COMP_TIMEOUT * HZ;
669         while (1) {
670                 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
671                     current_tmo);
672                 if (!ha->idc_extend_tmo || rem_tmo) {
673                         ha->idc_extend_tmo = 0;
674                         break;
675                 }
676                 current_tmo = ha->idc_extend_tmo * HZ;
677                 ha->idc_extend_tmo = 0;
678         }
679
680         if (!rem_tmo) {
681                 ql_dbg(ql_dbg_user, vha, 0x7022,
682                     "DCBX completion not received.\n");
683                 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
684                 /*
685                  * If the reset of the loopback mode doesn't work take a FCoE
686                  * dump and reset the chip.
687                  */
688                 if (ret) {
689                         ha->isp_ops->fw_dump(vha, 0);
690                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
691                 }
692                 rval = -EINVAL;
693         } else {
694                 if (ha->flags.idc_compl_status) {
695                         ql_dbg(ql_dbg_user, vha, 0x70c3,
696                             "Bad status in IDC Completion AEN\n");
697                         rval = -EINVAL;
698                         ha->flags.idc_compl_status = 0;
699                 } else
700                         ql_dbg(ql_dbg_user, vha, 0x7023,
701                             "DCBX completion received.\n");
702         }
703
704         ha->notify_dcbx_comp = 0;
705         ha->idc_extend_tmo = 0;
706
707 done_set_internal:
708         return rval;
709 }
710
711 static int
712 qla2x00_process_loopback(struct bsg_job *bsg_job)
713 {
714         struct fc_bsg_request *bsg_request = bsg_job->request;
715         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
716         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
717         scsi_qla_host_t *vha = shost_priv(host);
718         struct qla_hw_data *ha = vha->hw;
719         int rval;
720         uint8_t command_sent;
721         char *type;
722         struct msg_echo_lb elreq;
723         uint16_t response[MAILBOX_REGISTER_COUNT];
724         uint16_t config[4], new_config[4];
725         uint8_t *fw_sts_ptr;
726         uint8_t *req_data = NULL;
727         dma_addr_t req_data_dma;
728         uint32_t req_data_len;
729         uint8_t *rsp_data = NULL;
730         dma_addr_t rsp_data_dma;
731         uint32_t rsp_data_len;
732
733         if (!vha->flags.online) {
734                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
735                 return -EIO;
736         }
737
738         memset(&elreq, 0, sizeof(elreq));
739
740         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
741                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
742                 DMA_TO_DEVICE);
743
744         if (!elreq.req_sg_cnt) {
745                 ql_log(ql_log_warn, vha, 0x701a,
746                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
747                 return -ENOMEM;
748         }
749
750         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
751                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
752                 DMA_FROM_DEVICE);
753
754         if (!elreq.rsp_sg_cnt) {
755                 ql_log(ql_log_warn, vha, 0x701b,
756                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
757                 rval = -ENOMEM;
758                 goto done_unmap_req_sg;
759         }
760
761         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
762                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
763                 ql_log(ql_log_warn, vha, 0x701c,
764                     "dma mapping resulted in different sg counts, "
765                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
766                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
767                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
768                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
769                 rval = -EAGAIN;
770                 goto done_unmap_sg;
771         }
772         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
773         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
774                 &req_data_dma, GFP_KERNEL);
775         if (!req_data) {
776                 ql_log(ql_log_warn, vha, 0x701d,
777                     "dma alloc failed for req_data.\n");
778                 rval = -ENOMEM;
779                 goto done_unmap_sg;
780         }
781
782         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
783                 &rsp_data_dma, GFP_KERNEL);
784         if (!rsp_data) {
785                 ql_log(ql_log_warn, vha, 0x7004,
786                     "dma alloc failed for rsp_data.\n");
787                 rval = -ENOMEM;
788                 goto done_free_dma_req;
789         }
790
791         /* Copy the request buffer in req_data now */
792         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
793                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
794
795         elreq.send_dma = req_data_dma;
796         elreq.rcv_dma = rsp_data_dma;
797         elreq.transfer_size = req_data_len;
798
799         elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
800         elreq.iteration_count =
801             bsg_request->rqst_data.h_vendor.vendor_cmd[2];
802
803         if (atomic_read(&vha->loop_state) == LOOP_READY &&
804             (ha->current_topology == ISP_CFG_F ||
805             (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
806              req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
807             elreq.options == EXTERNAL_LOOPBACK) {
808                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
809                 ql_dbg(ql_dbg_user, vha, 0x701e,
810                     "BSG request type: %s.\n", type);
811                 command_sent = INT_DEF_LB_ECHO_CMD;
812                 rval = qla2x00_echo_test(vha, &elreq, response);
813         } else {
814                 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
815                         memset(config, 0, sizeof(config));
816                         memset(new_config, 0, sizeof(new_config));
817
818                         if (qla81xx_get_port_config(vha, config)) {
819                                 ql_log(ql_log_warn, vha, 0x701f,
820                                     "Get port config failed.\n");
821                                 rval = -EPERM;
822                                 goto done_free_dma_rsp;
823                         }
824
825                         if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
826                                 ql_dbg(ql_dbg_user, vha, 0x70c4,
827                                     "Loopback operation already in "
828                                     "progress.\n");
829                                 rval = -EAGAIN;
830                                 goto done_free_dma_rsp;
831                         }
832
833                         ql_dbg(ql_dbg_user, vha, 0x70c0,
834                             "elreq.options=%04x\n", elreq.options);
835
836                         if (elreq.options == EXTERNAL_LOOPBACK)
837                                 if (IS_QLA8031(ha) || IS_QLA8044(ha))
838                                         rval = qla81xx_set_loopback_mode(vha,
839                                             config, new_config, elreq.options);
840                                 else
841                                         rval = qla81xx_reset_loopback_mode(vha,
842                                             config, 1, 0);
843                         else
844                                 rval = qla81xx_set_loopback_mode(vha, config,
845                                     new_config, elreq.options);
846
847                         if (rval) {
848                                 rval = -EPERM;
849                                 goto done_free_dma_rsp;
850                         }
851
852                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
853                         ql_dbg(ql_dbg_user, vha, 0x7028,
854                             "BSG request type: %s.\n", type);
855
856                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
857                         rval = qla2x00_loopback_test(vha, &elreq, response);
858
859                         if (response[0] == MBS_COMMAND_ERROR &&
860                                         response[1] == MBS_LB_RESET) {
861                                 ql_log(ql_log_warn, vha, 0x7029,
862                                     "MBX command error, Aborting ISP.\n");
863                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
864                                 qla2xxx_wake_dpc(vha);
865                                 qla2x00_wait_for_chip_reset(vha);
866                                 /* Also reset the MPI */
867                                 if (IS_QLA81XX(ha)) {
868                                         if (qla81xx_restart_mpi_firmware(vha) !=
869                                             QLA_SUCCESS) {
870                                                 ql_log(ql_log_warn, vha, 0x702a,
871                                                     "MPI reset failed.\n");
872                                         }
873                                 }
874
875                                 rval = -EIO;
876                                 goto done_free_dma_rsp;
877                         }
878
879                         if (new_config[0]) {
880                                 int ret;
881
882                                 /* Revert back to original port config
883                                  * Also clear internal loopback
884                                  */
885                                 ret = qla81xx_reset_loopback_mode(vha,
886                                     new_config, 0, 1);
887                                 if (ret) {
888                                         /*
889                                          * If the reset of the loopback mode
890                                          * doesn't work take FCoE dump and then
891                                          * reset the chip.
892                                          */
893                                         ha->isp_ops->fw_dump(vha, 0);
894                                         set_bit(ISP_ABORT_NEEDED,
895                                             &vha->dpc_flags);
896                                 }
897
898                         }
899
900                 } else {
901                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
902                         ql_dbg(ql_dbg_user, vha, 0x702b,
903                             "BSG request type: %s.\n", type);
904                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
905                         rval = qla2x00_loopback_test(vha, &elreq, response);
906                 }
907         }
908
909         if (rval) {
910                 ql_log(ql_log_warn, vha, 0x702c,
911                     "Vendor request %s failed.\n", type);
912
913                 rval = 0;
914                 bsg_reply->result = (DID_ERROR << 16);
915                 bsg_reply->reply_payload_rcv_len = 0;
916         } else {
917                 ql_dbg(ql_dbg_user, vha, 0x702d,
918                     "Vendor request %s completed.\n", type);
919                 bsg_reply->result = (DID_OK << 16);
920                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
921                         bsg_job->reply_payload.sg_cnt, rsp_data,
922                         rsp_data_len);
923         }
924
925         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
926             sizeof(response) + sizeof(uint8_t);
927         fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
928             sizeof(struct fc_bsg_reply);
929         memcpy(fw_sts_ptr, response, sizeof(response));
930         fw_sts_ptr += sizeof(response);
931         *fw_sts_ptr = command_sent;
932
933 done_free_dma_rsp:
934         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
935                 rsp_data, rsp_data_dma);
936 done_free_dma_req:
937         dma_free_coherent(&ha->pdev->dev, req_data_len,
938                 req_data, req_data_dma);
939 done_unmap_sg:
940         dma_unmap_sg(&ha->pdev->dev,
941             bsg_job->reply_payload.sg_list,
942             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
943 done_unmap_req_sg:
944         dma_unmap_sg(&ha->pdev->dev,
945             bsg_job->request_payload.sg_list,
946             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
947         if (!rval)
948                 bsg_job_done(bsg_job, bsg_reply->result,
949                                bsg_reply->reply_payload_rcv_len);
950         return rval;
951 }
952
953 static int
954 qla84xx_reset(struct bsg_job *bsg_job)
955 {
956         struct fc_bsg_request *bsg_request = bsg_job->request;
957         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
958         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
959         scsi_qla_host_t *vha = shost_priv(host);
960         struct qla_hw_data *ha = vha->hw;
961         int rval = 0;
962         uint32_t flag;
963
964         if (!IS_QLA84XX(ha)) {
965                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
966                 return -EINVAL;
967         }
968
969         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
970
971         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
972
973         if (rval) {
974                 ql_log(ql_log_warn, vha, 0x7030,
975                     "Vendor request 84xx reset failed.\n");
976                 rval = (DID_ERROR << 16);
977
978         } else {
979                 ql_dbg(ql_dbg_user, vha, 0x7031,
980                     "Vendor request 84xx reset completed.\n");
981                 bsg_reply->result = DID_OK;
982                 bsg_job_done(bsg_job, bsg_reply->result,
983                                bsg_reply->reply_payload_rcv_len);
984         }
985
986         return rval;
987 }
988
989 static int
990 qla84xx_updatefw(struct bsg_job *bsg_job)
991 {
992         struct fc_bsg_request *bsg_request = bsg_job->request;
993         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
994         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
995         scsi_qla_host_t *vha = shost_priv(host);
996         struct qla_hw_data *ha = vha->hw;
997         struct verify_chip_entry_84xx *mn = NULL;
998         dma_addr_t mn_dma, fw_dma;
999         void *fw_buf = NULL;
1000         int rval = 0;
1001         uint32_t sg_cnt;
1002         uint32_t data_len;
1003         uint16_t options;
1004         uint32_t flag;
1005         uint32_t fw_ver;
1006
1007         if (!IS_QLA84XX(ha)) {
1008                 ql_dbg(ql_dbg_user, vha, 0x7032,
1009                     "Not 84xx, exiting.\n");
1010                 return -EINVAL;
1011         }
1012
1013         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1014                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1015         if (!sg_cnt) {
1016                 ql_log(ql_log_warn, vha, 0x7033,
1017                     "dma_map_sg returned %d for request.\n", sg_cnt);
1018                 return -ENOMEM;
1019         }
1020
1021         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1022                 ql_log(ql_log_warn, vha, 0x7034,
1023                     "DMA mapping resulted in different sg counts, "
1024                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1025                     bsg_job->request_payload.sg_cnt, sg_cnt);
1026                 rval = -EAGAIN;
1027                 goto done_unmap_sg;
1028         }
1029
1030         data_len = bsg_job->request_payload.payload_len;
1031         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1032                 &fw_dma, GFP_KERNEL);
1033         if (!fw_buf) {
1034                 ql_log(ql_log_warn, vha, 0x7035,
1035                     "DMA alloc failed for fw_buf.\n");
1036                 rval = -ENOMEM;
1037                 goto done_unmap_sg;
1038         }
1039
1040         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1041                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1042
1043         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1044         if (!mn) {
1045                 ql_log(ql_log_warn, vha, 0x7036,
1046                     "DMA alloc failed for fw buffer.\n");
1047                 rval = -ENOMEM;
1048                 goto done_free_fw_buf;
1049         }
1050
1051         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1052         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1053
1054         memset(mn, 0, sizeof(struct access_chip_84xx));
1055         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1056         mn->entry_count = 1;
1057
1058         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1059         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1060                 options |= VCO_DIAG_FW;
1061
1062         mn->options = cpu_to_le16(options);
1063         mn->fw_ver =  cpu_to_le32(fw_ver);
1064         mn->fw_size =  cpu_to_le32(data_len);
1065         mn->fw_seq_size =  cpu_to_le32(data_len);
1066         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1067         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1068         mn->dseg_length = cpu_to_le32(data_len);
1069         mn->data_seg_cnt = cpu_to_le16(1);
1070
1071         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1072
1073         if (rval) {
1074                 ql_log(ql_log_warn, vha, 0x7037,
1075                     "Vendor request 84xx updatefw failed.\n");
1076
1077                 rval = (DID_ERROR << 16);
1078         } else {
1079                 ql_dbg(ql_dbg_user, vha, 0x7038,
1080                     "Vendor request 84xx updatefw completed.\n");
1081
1082                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1083                 bsg_reply->result = DID_OK;
1084         }
1085
1086         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1087
1088 done_free_fw_buf:
1089         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1090
1091 done_unmap_sg:
1092         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1093                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1094
1095         if (!rval)
1096                 bsg_job_done(bsg_job, bsg_reply->result,
1097                                bsg_reply->reply_payload_rcv_len);
1098         return rval;
1099 }
1100
1101 static int
1102 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1103 {
1104         struct fc_bsg_request *bsg_request = bsg_job->request;
1105         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1106         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1107         scsi_qla_host_t *vha = shost_priv(host);
1108         struct qla_hw_data *ha = vha->hw;
1109         struct access_chip_84xx *mn = NULL;
1110         dma_addr_t mn_dma, mgmt_dma;
1111         void *mgmt_b = NULL;
1112         int rval = 0;
1113         struct qla_bsg_a84_mgmt *ql84_mgmt;
1114         uint32_t sg_cnt;
1115         uint32_t data_len = 0;
1116         uint32_t dma_direction = DMA_NONE;
1117
1118         if (!IS_QLA84XX(ha)) {
1119                 ql_log(ql_log_warn, vha, 0x703a,
1120                     "Not 84xx, exiting.\n");
1121                 return -EINVAL;
1122         }
1123
1124         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1125         if (!mn) {
1126                 ql_log(ql_log_warn, vha, 0x703c,
1127                     "DMA alloc failed for fw buffer.\n");
1128                 return -ENOMEM;
1129         }
1130
1131         memset(mn, 0, sizeof(struct access_chip_84xx));
1132         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1133         mn->entry_count = 1;
1134         ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1135         switch (ql84_mgmt->mgmt.cmd) {
1136         case QLA84_MGMT_READ_MEM:
1137         case QLA84_MGMT_GET_INFO:
1138                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1139                         bsg_job->reply_payload.sg_list,
1140                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1141                 if (!sg_cnt) {
1142                         ql_log(ql_log_warn, vha, 0x703d,
1143                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1144                         rval = -ENOMEM;
1145                         goto exit_mgmt;
1146                 }
1147
1148                 dma_direction = DMA_FROM_DEVICE;
1149
1150                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1151                         ql_log(ql_log_warn, vha, 0x703e,
1152                             "DMA mapping resulted in different sg counts, "
1153                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1154                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1155                         rval = -EAGAIN;
1156                         goto done_unmap_sg;
1157                 }
1158
1159                 data_len = bsg_job->reply_payload.payload_len;
1160
1161                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1162                     &mgmt_dma, GFP_KERNEL);
1163                 if (!mgmt_b) {
1164                         ql_log(ql_log_warn, vha, 0x703f,
1165                             "DMA alloc failed for mgmt_b.\n");
1166                         rval = -ENOMEM;
1167                         goto done_unmap_sg;
1168                 }
1169
1170                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1171                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1172                         mn->parameter1 =
1173                                 cpu_to_le32(
1174                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1175
1176                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1177                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1178                         mn->parameter1 =
1179                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1180
1181                         mn->parameter2 =
1182                                 cpu_to_le32(
1183                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1184                 }
1185                 break;
1186
1187         case QLA84_MGMT_WRITE_MEM:
1188                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1189                         bsg_job->request_payload.sg_list,
1190                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1191
1192                 if (!sg_cnt) {
1193                         ql_log(ql_log_warn, vha, 0x7040,
1194                             "dma_map_sg returned %d.\n", sg_cnt);
1195                         rval = -ENOMEM;
1196                         goto exit_mgmt;
1197                 }
1198
1199                 dma_direction = DMA_TO_DEVICE;
1200
1201                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1202                         ql_log(ql_log_warn, vha, 0x7041,
1203                             "DMA mapping resulted in different sg counts, "
1204                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1205                             bsg_job->request_payload.sg_cnt, sg_cnt);
1206                         rval = -EAGAIN;
1207                         goto done_unmap_sg;
1208                 }
1209
1210                 data_len = bsg_job->request_payload.payload_len;
1211                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1212                         &mgmt_dma, GFP_KERNEL);
1213                 if (!mgmt_b) {
1214                         ql_log(ql_log_warn, vha, 0x7042,
1215                             "DMA alloc failed for mgmt_b.\n");
1216                         rval = -ENOMEM;
1217                         goto done_unmap_sg;
1218                 }
1219
1220                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1221                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1222
1223                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1224                 mn->parameter1 =
1225                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1226                 break;
1227
1228         case QLA84_MGMT_CHNG_CONFIG:
1229                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1230                 mn->parameter1 =
1231                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1232
1233                 mn->parameter2 =
1234                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1235
1236                 mn->parameter3 =
1237                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1238                 break;
1239
1240         default:
1241                 rval = -EIO;
1242                 goto exit_mgmt;
1243         }
1244
1245         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1246                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1247                 mn->dseg_count = cpu_to_le16(1);
1248                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1249                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1250                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1251         }
1252
1253         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1254
1255         if (rval) {
1256                 ql_log(ql_log_warn, vha, 0x7043,
1257                     "Vendor request 84xx mgmt failed.\n");
1258
1259                 rval = (DID_ERROR << 16);
1260
1261         } else {
1262                 ql_dbg(ql_dbg_user, vha, 0x7044,
1263                     "Vendor request 84xx mgmt completed.\n");
1264
1265                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1266                 bsg_reply->result = DID_OK;
1267
1268                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1269                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1270                         bsg_reply->reply_payload_rcv_len =
1271                                 bsg_job->reply_payload.payload_len;
1272
1273                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1274                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1275                                 data_len);
1276                 }
1277         }
1278
1279 done_unmap_sg:
1280         if (mgmt_b)
1281                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1282
1283         if (dma_direction == DMA_TO_DEVICE)
1284                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1285                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1286         else if (dma_direction == DMA_FROM_DEVICE)
1287                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1288                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1289
1290 exit_mgmt:
1291         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1292
1293         if (!rval)
1294                 bsg_job_done(bsg_job, bsg_reply->result,
1295                                bsg_reply->reply_payload_rcv_len);
1296         return rval;
1297 }
1298
1299 static int
1300 qla24xx_iidma(struct bsg_job *bsg_job)
1301 {
1302         struct fc_bsg_request *bsg_request = bsg_job->request;
1303         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1304         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1305         scsi_qla_host_t *vha = shost_priv(host);
1306         int rval = 0;
1307         struct qla_port_param *port_param = NULL;
1308         fc_port_t *fcport = NULL;
1309         int found = 0;
1310         uint16_t mb[MAILBOX_REGISTER_COUNT];
1311         uint8_t *rsp_ptr = NULL;
1312
1313         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1314                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1315                 return -EINVAL;
1316         }
1317
1318         port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1319         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1320                 ql_log(ql_log_warn, vha, 0x7048,
1321                     "Invalid destination type.\n");
1322                 return -EINVAL;
1323         }
1324
1325         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1326                 if (fcport->port_type != FCT_TARGET)
1327                         continue;
1328
1329                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1330                         fcport->port_name, sizeof(fcport->port_name)))
1331                         continue;
1332
1333                 found = 1;
1334                 break;
1335         }
1336
1337         if (!found) {
1338                 ql_log(ql_log_warn, vha, 0x7049,
1339                     "Failed to find port.\n");
1340                 return -EINVAL;
1341         }
1342
1343         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1344                 ql_log(ql_log_warn, vha, 0x704a,
1345                     "Port is not online.\n");
1346                 return -EINVAL;
1347         }
1348
1349         if (fcport->flags & FCF_LOGIN_NEEDED) {
1350                 ql_log(ql_log_warn, vha, 0x704b,
1351                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1352                 return -EINVAL;
1353         }
1354
1355         if (port_param->mode)
1356                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1357                         port_param->speed, mb);
1358         else
1359                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1360                         &port_param->speed, mb);
1361
1362         if (rval) {
1363                 ql_log(ql_log_warn, vha, 0x704c,
1364                     "iIDMA cmd failed for %8phN -- "
1365                     "%04x %x %04x %04x.\n", fcport->port_name,
1366                     rval, fcport->fp_speed, mb[0], mb[1]);
1367                 rval = (DID_ERROR << 16);
1368         } else {
1369                 if (!port_param->mode) {
1370                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1371                                 sizeof(struct qla_port_param);
1372
1373                         rsp_ptr = ((uint8_t *)bsg_reply) +
1374                                 sizeof(struct fc_bsg_reply);
1375
1376                         memcpy(rsp_ptr, port_param,
1377                                 sizeof(struct qla_port_param));
1378                 }
1379
1380                 bsg_reply->result = DID_OK;
1381                 bsg_job_done(bsg_job, bsg_reply->result,
1382                                bsg_reply->reply_payload_rcv_len);
1383         }
1384
1385         return rval;
1386 }
1387
1388 static int
1389 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1390         uint8_t is_update)
1391 {
1392         struct fc_bsg_request *bsg_request = bsg_job->request;
1393         uint32_t start = 0;
1394         int valid = 0;
1395         struct qla_hw_data *ha = vha->hw;
1396
1397         if (unlikely(pci_channel_offline(ha->pdev)))
1398                 return -EINVAL;
1399
1400         start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1401         if (start > ha->optrom_size) {
1402                 ql_log(ql_log_warn, vha, 0x7055,
1403                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1404                 return -EINVAL;
1405         }
1406
1407         if (ha->optrom_state != QLA_SWAITING) {
1408                 ql_log(ql_log_info, vha, 0x7056,
1409                     "optrom_state %d.\n", ha->optrom_state);
1410                 return -EBUSY;
1411         }
1412
1413         ha->optrom_region_start = start;
1414         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1415         if (is_update) {
1416                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1417                         valid = 1;
1418                 else if (start == (ha->flt_region_boot * 4) ||
1419                     start == (ha->flt_region_fw * 4))
1420                         valid = 1;
1421                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1422                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1423                         valid = 1;
1424                 if (!valid) {
1425                         ql_log(ql_log_warn, vha, 0x7058,
1426                             "Invalid start region 0x%x/0x%x.\n", start,
1427                             bsg_job->request_payload.payload_len);
1428                         return -EINVAL;
1429                 }
1430
1431                 ha->optrom_region_size = start +
1432                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1433                     ha->optrom_size - start :
1434                     bsg_job->request_payload.payload_len;
1435                 ha->optrom_state = QLA_SWRITING;
1436         } else {
1437                 ha->optrom_region_size = start +
1438                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1439                     ha->optrom_size - start :
1440                     bsg_job->reply_payload.payload_len;
1441                 ha->optrom_state = QLA_SREADING;
1442         }
1443
1444         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1445         if (!ha->optrom_buffer) {
1446                 ql_log(ql_log_warn, vha, 0x7059,
1447                     "Read: Unable to allocate memory for optrom retrieval "
1448                     "(%x)\n", ha->optrom_region_size);
1449
1450                 ha->optrom_state = QLA_SWAITING;
1451                 return -ENOMEM;
1452         }
1453
1454         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1455         return 0;
1456 }
1457
1458 static int
1459 qla2x00_read_optrom(struct bsg_job *bsg_job)
1460 {
1461         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1462         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1463         scsi_qla_host_t *vha = shost_priv(host);
1464         struct qla_hw_data *ha = vha->hw;
1465         int rval = 0;
1466
1467         if (ha->flags.nic_core_reset_hdlr_active)
1468                 return -EBUSY;
1469
1470         mutex_lock(&ha->optrom_mutex);
1471         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1472         if (rval) {
1473                 mutex_unlock(&ha->optrom_mutex);
1474                 return rval;
1475         }
1476
1477         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1478             ha->optrom_region_start, ha->optrom_region_size);
1479
1480         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1481             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1482             ha->optrom_region_size);
1483
1484         bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1485         bsg_reply->result = DID_OK;
1486         vfree(ha->optrom_buffer);
1487         ha->optrom_buffer = NULL;
1488         ha->optrom_state = QLA_SWAITING;
1489         mutex_unlock(&ha->optrom_mutex);
1490         bsg_job_done(bsg_job, bsg_reply->result,
1491                        bsg_reply->reply_payload_rcv_len);
1492         return rval;
1493 }
1494
1495 static int
1496 qla2x00_update_optrom(struct bsg_job *bsg_job)
1497 {
1498         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1499         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1500         scsi_qla_host_t *vha = shost_priv(host);
1501         struct qla_hw_data *ha = vha->hw;
1502         int rval = 0;
1503
1504         mutex_lock(&ha->optrom_mutex);
1505         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1506         if (rval) {
1507                 mutex_unlock(&ha->optrom_mutex);
1508                 return rval;
1509         }
1510
1511         /* Set the isp82xx_no_md_cap not to capture minidump */
1512         ha->flags.isp82xx_no_md_cap = 1;
1513
1514         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1515             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1516             ha->optrom_region_size);
1517
1518         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1519             ha->optrom_region_start, ha->optrom_region_size);
1520
1521         bsg_reply->result = DID_OK;
1522         vfree(ha->optrom_buffer);
1523         ha->optrom_buffer = NULL;
1524         ha->optrom_state = QLA_SWAITING;
1525         mutex_unlock(&ha->optrom_mutex);
1526         bsg_job_done(bsg_job, bsg_reply->result,
1527                        bsg_reply->reply_payload_rcv_len);
1528         return rval;
1529 }
1530
1531 static int
1532 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1533 {
1534         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1535         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1536         scsi_qla_host_t *vha = shost_priv(host);
1537         struct qla_hw_data *ha = vha->hw;
1538         int rval = 0;
1539         uint8_t bsg[DMA_POOL_SIZE];
1540         struct qla_image_version_list *list = (void *)bsg;
1541         struct qla_image_version *image;
1542         uint32_t count;
1543         dma_addr_t sfp_dma;
1544         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1545         if (!sfp) {
1546                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1547                     EXT_STATUS_NO_MEMORY;
1548                 goto done;
1549         }
1550
1551         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1552             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1553
1554         image = list->version;
1555         count = list->count;
1556         while (count--) {
1557                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1558                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1559                     image->field_address.device, image->field_address.offset,
1560                     sizeof(image->field_info), image->field_address.option);
1561                 if (rval) {
1562                         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1563                             EXT_STATUS_MAILBOX;
1564                         goto dealloc;
1565                 }
1566                 image++;
1567         }
1568
1569         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1570
1571 dealloc:
1572         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1573
1574 done:
1575         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1576         bsg_reply->result = DID_OK << 16;
1577         bsg_job_done(bsg_job, bsg_reply->result,
1578                        bsg_reply->reply_payload_rcv_len);
1579
1580         return 0;
1581 }
1582
1583 static int
1584 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1585 {
1586         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1587         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1588         scsi_qla_host_t *vha = shost_priv(host);
1589         struct qla_hw_data *ha = vha->hw;
1590         int rval = 0;
1591         uint8_t bsg[DMA_POOL_SIZE];
1592         struct qla_status_reg *sr = (void *)bsg;
1593         dma_addr_t sfp_dma;
1594         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1595         if (!sfp) {
1596                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1597                     EXT_STATUS_NO_MEMORY;
1598                 goto done;
1599         }
1600
1601         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1602             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1603
1604         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1605             sr->field_address.device, sr->field_address.offset,
1606             sizeof(sr->status_reg), sr->field_address.option);
1607         sr->status_reg = *sfp;
1608
1609         if (rval) {
1610                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1611                     EXT_STATUS_MAILBOX;
1612                 goto dealloc;
1613         }
1614
1615         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1616             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1617
1618         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1619
1620 dealloc:
1621         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1622
1623 done:
1624         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1625         bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1626         bsg_reply->result = DID_OK << 16;
1627         bsg_job_done(bsg_job, bsg_reply->result,
1628                        bsg_reply->reply_payload_rcv_len);
1629
1630         return 0;
1631 }
1632
1633 static int
1634 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1635 {
1636         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1637         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1638         scsi_qla_host_t *vha = shost_priv(host);
1639         struct qla_hw_data *ha = vha->hw;
1640         int rval = 0;
1641         uint8_t bsg[DMA_POOL_SIZE];
1642         struct qla_status_reg *sr = (void *)bsg;
1643         dma_addr_t sfp_dma;
1644         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1645         if (!sfp) {
1646                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1647                     EXT_STATUS_NO_MEMORY;
1648                 goto done;
1649         }
1650
1651         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1652             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1653
1654         *sfp = sr->status_reg;
1655         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1656             sr->field_address.device, sr->field_address.offset,
1657             sizeof(sr->status_reg), sr->field_address.option);
1658
1659         if (rval) {
1660                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1661                     EXT_STATUS_MAILBOX;
1662                 goto dealloc;
1663         }
1664
1665         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1666
1667 dealloc:
1668         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1669
1670 done:
1671         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1672         bsg_reply->result = DID_OK << 16;
1673         bsg_job_done(bsg_job, bsg_reply->result,
1674                        bsg_reply->reply_payload_rcv_len);
1675
1676         return 0;
1677 }
1678
1679 static int
1680 qla2x00_write_i2c(struct bsg_job *bsg_job)
1681 {
1682         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1683         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1684         scsi_qla_host_t *vha = shost_priv(host);
1685         struct qla_hw_data *ha = vha->hw;
1686         int rval = 0;
1687         uint8_t bsg[DMA_POOL_SIZE];
1688         struct qla_i2c_access *i2c = (void *)bsg;
1689         dma_addr_t sfp_dma;
1690         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1691         if (!sfp) {
1692                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1693                     EXT_STATUS_NO_MEMORY;
1694                 goto done;
1695         }
1696
1697         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1698             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1699
1700         memcpy(sfp, i2c->buffer, i2c->length);
1701         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1702             i2c->device, i2c->offset, i2c->length, i2c->option);
1703
1704         if (rval) {
1705                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1706                     EXT_STATUS_MAILBOX;
1707                 goto dealloc;
1708         }
1709
1710         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1711
1712 dealloc:
1713         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1714
1715 done:
1716         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1717         bsg_reply->result = DID_OK << 16;
1718         bsg_job_done(bsg_job, bsg_reply->result,
1719                        bsg_reply->reply_payload_rcv_len);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 qla2x00_read_i2c(struct bsg_job *bsg_job)
1726 {
1727         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1728         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1729         scsi_qla_host_t *vha = shost_priv(host);
1730         struct qla_hw_data *ha = vha->hw;
1731         int rval = 0;
1732         uint8_t bsg[DMA_POOL_SIZE];
1733         struct qla_i2c_access *i2c = (void *)bsg;
1734         dma_addr_t sfp_dma;
1735         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1736         if (!sfp) {
1737                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1738                     EXT_STATUS_NO_MEMORY;
1739                 goto done;
1740         }
1741
1742         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1743             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1744
1745         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1746                 i2c->device, i2c->offset, i2c->length, i2c->option);
1747
1748         if (rval) {
1749                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1750                     EXT_STATUS_MAILBOX;
1751                 goto dealloc;
1752         }
1753
1754         memcpy(i2c->buffer, sfp, i2c->length);
1755         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1756             bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1757
1758         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1759
1760 dealloc:
1761         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1762
1763 done:
1764         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1765         bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1766         bsg_reply->result = DID_OK << 16;
1767         bsg_job_done(bsg_job, bsg_reply->result,
1768                        bsg_reply->reply_payload_rcv_len);
1769
1770         return 0;
1771 }
1772
1773 static int
1774 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1775 {
1776         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1777         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1778         scsi_qla_host_t *vha = shost_priv(host);
1779         struct qla_hw_data *ha = vha->hw;
1780         uint32_t rval = EXT_STATUS_OK;
1781         uint16_t req_sg_cnt = 0;
1782         uint16_t rsp_sg_cnt = 0;
1783         uint16_t nextlid = 0;
1784         uint32_t tot_dsds;
1785         srb_t *sp = NULL;
1786         uint32_t req_data_len;
1787         uint32_t rsp_data_len;
1788
1789         /* Check the type of the adapter */
1790         if (!IS_BIDI_CAPABLE(ha)) {
1791                 ql_log(ql_log_warn, vha, 0x70a0,
1792                         "This adapter is not supported\n");
1793                 rval = EXT_STATUS_NOT_SUPPORTED;
1794                 goto done;
1795         }
1796
1797         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1798                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1799                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1800                 rval =  EXT_STATUS_BUSY;
1801                 goto done;
1802         }
1803
1804         /* Check if host is online */
1805         if (!vha->flags.online) {
1806                 ql_log(ql_log_warn, vha, 0x70a1,
1807                         "Host is not online\n");
1808                 rval = EXT_STATUS_DEVICE_OFFLINE;
1809                 goto done;
1810         }
1811
1812         /* Check if cable is plugged in or not */
1813         if (vha->device_flags & DFLG_NO_CABLE) {
1814                 ql_log(ql_log_warn, vha, 0x70a2,
1815                         "Cable is unplugged...\n");
1816                 rval = EXT_STATUS_INVALID_CFG;
1817                 goto done;
1818         }
1819
1820         /* Check if the switch is connected or not */
1821         if (ha->current_topology != ISP_CFG_F) {
1822                 ql_log(ql_log_warn, vha, 0x70a3,
1823                         "Host is not connected to the switch\n");
1824                 rval = EXT_STATUS_INVALID_CFG;
1825                 goto done;
1826         }
1827
1828         /* Check if operating mode is P2P */
1829         if (ha->operating_mode != P2P) {
1830                 ql_log(ql_log_warn, vha, 0x70a4,
1831                     "Host operating mode is not P2p\n");
1832                 rval = EXT_STATUS_INVALID_CFG;
1833                 goto done;
1834         }
1835
1836         mutex_lock(&ha->selflogin_lock);
1837         if (vha->self_login_loop_id == 0) {
1838                 /* Initialize all required  fields of fcport */
1839                 vha->bidir_fcport.vha = vha;
1840                 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1841                 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1842                 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1843                 vha->bidir_fcport.loop_id = vha->loop_id;
1844
1845                 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1846                         ql_log(ql_log_warn, vha, 0x70a7,
1847                             "Failed to login port %06X for bidirectional IOCB\n",
1848                             vha->bidir_fcport.d_id.b24);
1849                         mutex_unlock(&ha->selflogin_lock);
1850                         rval = EXT_STATUS_MAILBOX;
1851                         goto done;
1852                 }
1853                 vha->self_login_loop_id = nextlid - 1;
1854
1855         }
1856         /* Assign the self login loop id to fcport */
1857         mutex_unlock(&ha->selflogin_lock);
1858
1859         vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1860
1861         req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1862                 bsg_job->request_payload.sg_list,
1863                 bsg_job->request_payload.sg_cnt,
1864                 DMA_TO_DEVICE);
1865
1866         if (!req_sg_cnt) {
1867                 rval = EXT_STATUS_NO_MEMORY;
1868                 goto done;
1869         }
1870
1871         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1872                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1873                 DMA_FROM_DEVICE);
1874
1875         if (!rsp_sg_cnt) {
1876                 rval = EXT_STATUS_NO_MEMORY;
1877                 goto done_unmap_req_sg;
1878         }
1879
1880         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1881                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1882                 ql_dbg(ql_dbg_user, vha, 0x70a9,
1883                     "Dma mapping resulted in different sg counts "
1884                     "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1885                     "%x dma_reply_sg_cnt: %x]\n",
1886                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1887                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1888                 rval = EXT_STATUS_NO_MEMORY;
1889                 goto done_unmap_sg;
1890         }
1891
1892         req_data_len = bsg_job->request_payload.payload_len;
1893         rsp_data_len = bsg_job->reply_payload.payload_len;
1894
1895         if (req_data_len != rsp_data_len) {
1896                 rval = EXT_STATUS_BUSY;
1897                 ql_log(ql_log_warn, vha, 0x70aa,
1898                     "req_data_len != rsp_data_len\n");
1899                 goto done_unmap_sg;
1900         }
1901
1902         /* Alloc SRB structure */
1903         sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1904         if (!sp) {
1905                 ql_dbg(ql_dbg_user, vha, 0x70ac,
1906                     "Alloc SRB structure failed\n");
1907                 rval = EXT_STATUS_NO_MEMORY;
1908                 goto done_unmap_sg;
1909         }
1910
1911         /*Populate srb->ctx with bidir ctx*/
1912         sp->u.bsg_job = bsg_job;
1913         sp->free = qla2x00_bsg_sp_free;
1914         sp->type = SRB_BIDI_CMD;
1915         sp->done = qla2x00_bsg_job_done;
1916
1917         /* Add the read and write sg count */
1918         tot_dsds = rsp_sg_cnt + req_sg_cnt;
1919
1920         rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1921         if (rval != EXT_STATUS_OK)
1922                 goto done_free_srb;
1923         /* the bsg request  will be completed in the interrupt handler */
1924         return rval;
1925
1926 done_free_srb:
1927         mempool_free(sp, ha->srb_mempool);
1928 done_unmap_sg:
1929         dma_unmap_sg(&ha->pdev->dev,
1930             bsg_job->reply_payload.sg_list,
1931             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1932 done_unmap_req_sg:
1933         dma_unmap_sg(&ha->pdev->dev,
1934             bsg_job->request_payload.sg_list,
1935             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1936 done:
1937
1938         /* Return an error vendor specific response
1939          * and complete the bsg request
1940          */
1941         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1942         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1943         bsg_reply->reply_payload_rcv_len = 0;
1944         bsg_reply->result = (DID_OK) << 16;
1945         bsg_job_done(bsg_job, bsg_reply->result,
1946                        bsg_reply->reply_payload_rcv_len);
1947         /* Always return success, vendor rsp carries correct status */
1948         return 0;
1949 }
1950
1951 static int
1952 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1953 {
1954         struct fc_bsg_request *bsg_request = bsg_job->request;
1955         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1956         scsi_qla_host_t *vha = shost_priv(host);
1957         struct qla_hw_data *ha = vha->hw;
1958         int rval = (DID_ERROR << 16);
1959         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1960         srb_t *sp;
1961         int req_sg_cnt = 0, rsp_sg_cnt = 0;
1962         struct fc_port *fcport;
1963         char  *type = "FC_BSG_HST_FX_MGMT";
1964
1965         /* Copy the IOCB specific information */
1966         piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1967             &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1968
1969         /* Dump the vendor information */
1970         ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1971             (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1972
1973         if (!vha->flags.online) {
1974                 ql_log(ql_log_warn, vha, 0x70d0,
1975                     "Host is not online.\n");
1976                 rval = -EIO;
1977                 goto done;
1978         }
1979
1980         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1981                 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1982                     bsg_job->request_payload.sg_list,
1983                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1984                 if (!req_sg_cnt) {
1985                         ql_log(ql_log_warn, vha, 0x70c7,
1986                             "dma_map_sg return %d for request\n", req_sg_cnt);
1987                         rval = -ENOMEM;
1988                         goto done;
1989                 }
1990         }
1991
1992         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1993                 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1994                     bsg_job->reply_payload.sg_list,
1995                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1996                 if (!rsp_sg_cnt) {
1997                         ql_log(ql_log_warn, vha, 0x70c8,
1998                             "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1999                         rval = -ENOMEM;
2000                         goto done_unmap_req_sg;
2001                 }
2002         }
2003
2004         ql_dbg(ql_dbg_user, vha, 0x70c9,
2005             "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2006             "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2007             req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2008
2009         /* Allocate a dummy fcport structure, since functions preparing the
2010          * IOCB and mailbox command retrieves port specific information
2011          * from fcport structure. For Host based ELS commands there will be
2012          * no fcport structure allocated
2013          */
2014         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2015         if (!fcport) {
2016                 ql_log(ql_log_warn, vha, 0x70ca,
2017                     "Failed to allocate fcport.\n");
2018                 rval = -ENOMEM;
2019                 goto done_unmap_rsp_sg;
2020         }
2021
2022         /* Alloc SRB structure */
2023         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2024         if (!sp) {
2025                 ql_log(ql_log_warn, vha, 0x70cb,
2026                     "qla2x00_get_sp failed.\n");
2027                 rval = -ENOMEM;
2028                 goto done_free_fcport;
2029         }
2030
2031         /* Initialize all required  fields of fcport */
2032         fcport->vha = vha;
2033         fcport->loop_id = piocb_rqst->dataword;
2034
2035         sp->type = SRB_FXIOCB_BCMD;
2036         sp->name = "bsg_fx_mgmt";
2037         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2038         sp->u.bsg_job = bsg_job;
2039         sp->free = qla2x00_bsg_sp_free;
2040         sp->done = qla2x00_bsg_job_done;
2041
2042         ql_dbg(ql_dbg_user, vha, 0x70cc,
2043             "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2044             type, piocb_rqst->func_type, fcport->loop_id);
2045
2046         rval = qla2x00_start_sp(sp);
2047         if (rval != QLA_SUCCESS) {
2048                 ql_log(ql_log_warn, vha, 0x70cd,
2049                     "qla2x00_start_sp failed=%d.\n", rval);
2050                 mempool_free(sp, ha->srb_mempool);
2051                 rval = -EIO;
2052                 goto done_free_fcport;
2053         }
2054         return rval;
2055
2056 done_free_fcport:
2057         kfree(fcport);
2058
2059 done_unmap_rsp_sg:
2060         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2061                 dma_unmap_sg(&ha->pdev->dev,
2062                     bsg_job->reply_payload.sg_list,
2063                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2064 done_unmap_req_sg:
2065         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2066                 dma_unmap_sg(&ha->pdev->dev,
2067                     bsg_job->request_payload.sg_list,
2068                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2069
2070 done:
2071         return rval;
2072 }
2073
2074 static int
2075 qla26xx_serdes_op(struct bsg_job *bsg_job)
2076 {
2077         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2078         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2079         scsi_qla_host_t *vha = shost_priv(host);
2080         int rval = 0;
2081         struct qla_serdes_reg sr;
2082
2083         memset(&sr, 0, sizeof(sr));
2084
2085         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2086             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2087
2088         switch (sr.cmd) {
2089         case INT_SC_SERDES_WRITE_REG:
2090                 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2091                 bsg_reply->reply_payload_rcv_len = 0;
2092                 break;
2093         case INT_SC_SERDES_READ_REG:
2094                 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2095                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2096                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2097                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2098                 break;
2099         default:
2100                 ql_dbg(ql_dbg_user, vha, 0x708c,
2101                     "Unknown serdes cmd %x.\n", sr.cmd);
2102                 rval = -EINVAL;
2103                 break;
2104         }
2105
2106         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2107             rval ? EXT_STATUS_MAILBOX : 0;
2108
2109         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2110         bsg_reply->result = DID_OK << 16;
2111         bsg_job_done(bsg_job, bsg_reply->result,
2112                        bsg_reply->reply_payload_rcv_len);
2113         return 0;
2114 }
2115
2116 static int
2117 qla8044_serdes_op(struct bsg_job *bsg_job)
2118 {
2119         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2120         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2121         scsi_qla_host_t *vha = shost_priv(host);
2122         int rval = 0;
2123         struct qla_serdes_reg_ex sr;
2124
2125         memset(&sr, 0, sizeof(sr));
2126
2127         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2128             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2129
2130         switch (sr.cmd) {
2131         case INT_SC_SERDES_WRITE_REG:
2132                 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2133                 bsg_reply->reply_payload_rcv_len = 0;
2134                 break;
2135         case INT_SC_SERDES_READ_REG:
2136                 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2137                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2138                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2139                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2140                 break;
2141         default:
2142                 ql_dbg(ql_dbg_user, vha, 0x7020,
2143                     "Unknown serdes cmd %x.\n", sr.cmd);
2144                 rval = -EINVAL;
2145                 break;
2146         }
2147
2148         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2149             rval ? EXT_STATUS_MAILBOX : 0;
2150
2151         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2152         bsg_reply->result = DID_OK << 16;
2153         bsg_job_done(bsg_job, bsg_reply->result,
2154                        bsg_reply->reply_payload_rcv_len);
2155         return 0;
2156 }
2157
2158 static int
2159 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2160 {
2161         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2162         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2163         scsi_qla_host_t *vha = shost_priv(host);
2164         struct qla_hw_data *ha = vha->hw;
2165         struct qla_flash_update_caps cap;
2166
2167         if (!(IS_QLA27XX(ha)))
2168                 return -EPERM;
2169
2170         memset(&cap, 0, sizeof(cap));
2171         cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2172                            (uint64_t)ha->fw_attributes_ext[0] << 32 |
2173                            (uint64_t)ha->fw_attributes_h << 16 |
2174                            (uint64_t)ha->fw_attributes;
2175
2176         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2177             bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2178         bsg_reply->reply_payload_rcv_len = sizeof(cap);
2179
2180         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2181             EXT_STATUS_OK;
2182
2183         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2184         bsg_reply->result = DID_OK << 16;
2185         bsg_job_done(bsg_job, bsg_reply->result,
2186                        bsg_reply->reply_payload_rcv_len);
2187         return 0;
2188 }
2189
2190 static int
2191 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2192 {
2193         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2194         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2195         scsi_qla_host_t *vha = shost_priv(host);
2196         struct qla_hw_data *ha = vha->hw;
2197         uint64_t online_fw_attr = 0;
2198         struct qla_flash_update_caps cap;
2199
2200         if (!(IS_QLA27XX(ha)))
2201                 return -EPERM;
2202
2203         memset(&cap, 0, sizeof(cap));
2204         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2205             bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2206
2207         online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2208                          (uint64_t)ha->fw_attributes_ext[0] << 32 |
2209                          (uint64_t)ha->fw_attributes_h << 16 |
2210                          (uint64_t)ha->fw_attributes;
2211
2212         if (online_fw_attr != cap.capabilities) {
2213                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2214                     EXT_STATUS_INVALID_PARAM;
2215                 return -EINVAL;
2216         }
2217
2218         if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2219                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2220                     EXT_STATUS_INVALID_PARAM;
2221                 return -EINVAL;
2222         }
2223
2224         bsg_reply->reply_payload_rcv_len = 0;
2225
2226         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2227             EXT_STATUS_OK;
2228
2229         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2230         bsg_reply->result = DID_OK << 16;
2231         bsg_job_done(bsg_job, bsg_reply->result,
2232                        bsg_reply->reply_payload_rcv_len);
2233         return 0;
2234 }
2235
2236 static int
2237 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2238 {
2239         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2240         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2241         scsi_qla_host_t *vha = shost_priv(host);
2242         struct qla_hw_data *ha = vha->hw;
2243         struct qla_bbcr_data bbcr;
2244         uint16_t loop_id, topo, sw_cap;
2245         uint8_t domain, area, al_pa, state;
2246         int rval;
2247
2248         if (!(IS_QLA27XX(ha)))
2249                 return -EPERM;
2250
2251         memset(&bbcr, 0, sizeof(bbcr));
2252
2253         if (vha->flags.bbcr_enable)
2254                 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2255         else
2256                 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2257
2258         if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2259                 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2260                         &area, &domain, &topo, &sw_cap);
2261                 if (rval != QLA_SUCCESS) {
2262                         bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2263                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2264                         bbcr.mbx1 = loop_id;
2265                         goto done;
2266                 }
2267
2268                 state = (vha->bbcr >> 12) & 0x1;
2269
2270                 if (state) {
2271                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2272                         bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2273                 } else {
2274                         bbcr.state = QLA_BBCR_STATE_ONLINE;
2275                         bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2276                 }
2277
2278                 bbcr.configured_bbscn = vha->bbcr & 0xf;
2279         }
2280
2281 done:
2282         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2283                 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2284         bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2285
2286         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2287
2288         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2289         bsg_reply->result = DID_OK << 16;
2290         bsg_job_done(bsg_job, bsg_reply->result,
2291                        bsg_reply->reply_payload_rcv_len);
2292         return 0;
2293 }
2294
2295 static int
2296 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2297 {
2298         struct fc_bsg_request *bsg_request = bsg_job->request;
2299         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2300         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2301         scsi_qla_host_t *vha = shost_priv(host);
2302         struct qla_hw_data *ha = vha->hw;
2303         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2304         struct link_statistics *stats = NULL;
2305         dma_addr_t stats_dma;
2306         int rval;
2307         uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2308         uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2309
2310         if (test_bit(UNLOADING, &vha->dpc_flags))
2311                 return -ENODEV;
2312
2313         if (unlikely(pci_channel_offline(ha->pdev)))
2314                 return -ENODEV;
2315
2316         if (qla2x00_reset_active(vha))
2317                 return -EBUSY;
2318
2319         if (!IS_FWI2_CAPABLE(ha))
2320                 return -EPERM;
2321
2322         stats = dma_alloc_coherent(&ha->pdev->dev,
2323                 sizeof(*stats), &stats_dma, GFP_KERNEL);
2324         if (!stats) {
2325                 ql_log(ql_log_warn, vha, 0x70e2,
2326                     "Failed to allocate memory for stats.\n");
2327                 return -ENOMEM;
2328         }
2329
2330         memset(stats, 0, sizeof(*stats));
2331
2332         rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2333
2334         if (rval == QLA_SUCCESS) {
2335                 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2336                     (uint8_t *)stats, sizeof(*stats));
2337                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2338                         bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2339         }
2340
2341         bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2342         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2343             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2344
2345         bsg_job->reply_len = sizeof(*bsg_reply);
2346         bsg_reply->result = DID_OK << 16;
2347         bsg_job_done(bsg_job, bsg_reply->result,
2348                        bsg_reply->reply_payload_rcv_len);
2349
2350         dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2351                 stats, stats_dma);
2352
2353         return 0;
2354 }
2355
2356 static int
2357 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2358 {
2359         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2360         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2361         scsi_qla_host_t *vha = shost_priv(host);
2362         int rval;
2363         struct qla_dport_diag *dd;
2364
2365         if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2366                 return -EPERM;
2367
2368         dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2369         if (!dd) {
2370                 ql_log(ql_log_warn, vha, 0x70db,
2371                     "Failed to allocate memory for dport.\n");
2372                 return -ENOMEM;
2373         }
2374
2375         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2376             bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2377
2378         rval = qla26xx_dport_diagnostics(
2379             vha, dd->buf, sizeof(dd->buf), dd->options);
2380         if (rval == QLA_SUCCESS) {
2381                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2382                     bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2383         }
2384
2385         bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2386         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2387             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2388
2389         bsg_job->reply_len = sizeof(*bsg_reply);
2390         bsg_reply->result = DID_OK << 16;
2391         bsg_job_done(bsg_job, bsg_reply->result,
2392                        bsg_reply->reply_payload_rcv_len);
2393
2394         kfree(dd);
2395
2396         return 0;
2397 }
2398
2399 static int
2400 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2401 {
2402         struct fc_bsg_request *bsg_request = bsg_job->request;
2403
2404         switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2405         case QL_VND_LOOPBACK:
2406                 return qla2x00_process_loopback(bsg_job);
2407
2408         case QL_VND_A84_RESET:
2409                 return qla84xx_reset(bsg_job);
2410
2411         case QL_VND_A84_UPDATE_FW:
2412                 return qla84xx_updatefw(bsg_job);
2413
2414         case QL_VND_A84_MGMT_CMD:
2415                 return qla84xx_mgmt_cmd(bsg_job);
2416
2417         case QL_VND_IIDMA:
2418                 return qla24xx_iidma(bsg_job);
2419
2420         case QL_VND_FCP_PRIO_CFG_CMD:
2421                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2422
2423         case QL_VND_READ_FLASH:
2424                 return qla2x00_read_optrom(bsg_job);
2425
2426         case QL_VND_UPDATE_FLASH:
2427                 return qla2x00_update_optrom(bsg_job);
2428
2429         case QL_VND_SET_FRU_VERSION:
2430                 return qla2x00_update_fru_versions(bsg_job);
2431
2432         case QL_VND_READ_FRU_STATUS:
2433                 return qla2x00_read_fru_status(bsg_job);
2434
2435         case QL_VND_WRITE_FRU_STATUS:
2436                 return qla2x00_write_fru_status(bsg_job);
2437
2438         case QL_VND_WRITE_I2C:
2439                 return qla2x00_write_i2c(bsg_job);
2440
2441         case QL_VND_READ_I2C:
2442                 return qla2x00_read_i2c(bsg_job);
2443
2444         case QL_VND_DIAG_IO_CMD:
2445                 return qla24xx_process_bidir_cmd(bsg_job);
2446
2447         case QL_VND_FX00_MGMT_CMD:
2448                 return qlafx00_mgmt_cmd(bsg_job);
2449
2450         case QL_VND_SERDES_OP:
2451                 return qla26xx_serdes_op(bsg_job);
2452
2453         case QL_VND_SERDES_OP_EX:
2454                 return qla8044_serdes_op(bsg_job);
2455
2456         case QL_VND_GET_FLASH_UPDATE_CAPS:
2457                 return qla27xx_get_flash_upd_cap(bsg_job);
2458
2459         case QL_VND_SET_FLASH_UPDATE_CAPS:
2460                 return qla27xx_set_flash_upd_cap(bsg_job);
2461
2462         case QL_VND_GET_BBCR_DATA:
2463                 return qla27xx_get_bbcr_data(bsg_job);
2464
2465         case QL_VND_GET_PRIV_STATS:
2466         case QL_VND_GET_PRIV_STATS_EX:
2467                 return qla2x00_get_priv_stats(bsg_job);
2468
2469         case QL_VND_DPORT_DIAGNOSTICS:
2470                 return qla2x00_do_dport_diagnostics(bsg_job);
2471
2472         default:
2473                 return -ENOSYS;
2474         }
2475 }
2476
2477 int
2478 qla24xx_bsg_request(struct bsg_job *bsg_job)
2479 {
2480         struct fc_bsg_request *bsg_request = bsg_job->request;
2481         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2482         int ret = -EINVAL;
2483         struct fc_rport *rport;
2484         struct Scsi_Host *host;
2485         scsi_qla_host_t *vha;
2486
2487         /* In case no data transferred. */
2488         bsg_reply->reply_payload_rcv_len = 0;
2489
2490         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2491                 rport = fc_bsg_to_rport(bsg_job);
2492                 host = rport_to_shost(rport);
2493                 vha = shost_priv(host);
2494         } else {
2495                 host = fc_bsg_to_shost(bsg_job);
2496                 vha = shost_priv(host);
2497         }
2498
2499         if (qla2x00_reset_active(vha)) {
2500                 ql_dbg(ql_dbg_user, vha, 0x709f,
2501                     "BSG: ISP abort active/needed -- cmd=%d.\n",
2502                     bsg_request->msgcode);
2503                 return -EBUSY;
2504         }
2505
2506         ql_dbg(ql_dbg_user, vha, 0x7000,
2507             "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2508
2509         switch (bsg_request->msgcode) {
2510         case FC_BSG_RPT_ELS:
2511         case FC_BSG_HST_ELS_NOLOGIN:
2512                 ret = qla2x00_process_els(bsg_job);
2513                 break;
2514         case FC_BSG_HST_CT:
2515                 ret = qla2x00_process_ct(bsg_job);
2516                 break;
2517         case FC_BSG_HST_VENDOR:
2518                 ret = qla2x00_process_vendor_specific(bsg_job);
2519                 break;
2520         case FC_BSG_HST_ADD_RPORT:
2521         case FC_BSG_HST_DEL_RPORT:
2522         case FC_BSG_RPT_CT:
2523         default:
2524                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2525                 break;
2526         }
2527         return ret;
2528 }
2529
2530 int
2531 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2532 {
2533         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2534         scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2535         struct qla_hw_data *ha = vha->hw;
2536         srb_t *sp;
2537         int cnt, que;
2538         unsigned long flags;
2539         struct req_que *req;
2540
2541         /* find the bsg job from the active list of commands */
2542         spin_lock_irqsave(&ha->hardware_lock, flags);
2543         for (que = 0; que < ha->max_req_queues; que++) {
2544                 req = ha->req_q_map[que];
2545                 if (!req)
2546                         continue;
2547
2548                 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2549                         sp = req->outstanding_cmds[cnt];
2550                         if (sp) {
2551                                 if (((sp->type == SRB_CT_CMD) ||
2552                                         (sp->type == SRB_ELS_CMD_HST) ||
2553                                         (sp->type == SRB_FXIOCB_BCMD))
2554                                         && (sp->u.bsg_job == bsg_job)) {
2555                                         req->outstanding_cmds[cnt] = NULL;
2556                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2557                                         if (ha->isp_ops->abort_command(sp)) {
2558                                                 ql_log(ql_log_warn, vha, 0x7089,
2559                                                     "mbx abort_command "
2560                                                     "failed.\n");
2561                                                 scsi_req(bsg_job->req)->result =
2562                                                 bsg_reply->result = -EIO;
2563                                         } else {
2564                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
2565                                                     "mbx abort_command "
2566                                                     "success.\n");
2567                                                 scsi_req(bsg_job->req)->result =
2568                                                 bsg_reply->result = 0;
2569                                         }
2570                                         spin_lock_irqsave(&ha->hardware_lock, flags);
2571                                         goto done;
2572                                 }
2573                         }
2574                 }
2575         }
2576         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2577         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2578         scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO;
2579         return 0;
2580
2581 done:
2582         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2583         sp->free(sp);
2584         return 0;
2585 }